problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_282 | rasdani/github-patches | git_diff | opendatacube__datacube-core-1331 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Code includes Type Annotations, but they're not made available for type checking (PEP561)
**Summary**
The ODC code is fairly well annotated with [Python type hints](https://typing.readthedocs.io/en/latest/), but these type hints aren't made availble for use in downstream packages by following [PEP 561 – Distributing and Packaging Type Information | peps.python.org](https://peps.python.org/pep-0561/).
**Proposal**
Since ODC Core includes inline type hints with the code, we need to follow [packaging type information - PEP561](https://peps.python.org/pep-0561/#packaging-type-information) by adding an empty `datacube/py.typed` file, and ensuring it's distributed by adding it to `package_data` in `setup.py`.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 from setuptools import setup, find_packages
4
5 tests_require = [
6 'hypothesis',
7 'pycodestyle',
8 'pylint',
9 'pytest',
10 'pytest-cov',
11 'pytest-timeout',
12 'pytest-httpserver',
13 'moto',
14 ]
15 doc_require = [
16 'Sphinx',
17 'sphinx_rtd_theme',
18 'sphinx_autodoc_typehints', # Propagate mypy info into docs
19 'sphinx-click',
20 'recommonmark',
21 'setuptools', # version related dependencies
22 'setuptools_scm[toml]',
23 ]
24
25 extras_require = {
26 'performance': ['ciso8601', 'bottleneck'],
27 'distributed': ['distributed', 'dask[distributed]'],
28 'doc': doc_require,
29 's3': ['boto3', 'botocore'],
30 'test': tests_require,
31 'cf': ['compliance-checker>=4.0.0'],
32 }
33
34 extras_require['dev'] = sorted(set(sum([extras_require[k] for k in [
35 'test',
36 'doc',
37 'performance',
38 's3',
39 'distributed',
40 ]], [])))
41
42 # An 'all' option, following ipython naming conventions.
43 extras_require['all'] = sorted(set(sum(extras_require.values(), [])))
44
45 extra_plugins = dict(read=[], write=[], index=[])
46
47 setup(
48 name='datacube',
49 python_requires='>=3.8.0',
50
51 url='https://github.com/opendatacube/datacube-core',
52 author='Open Data Cube',
53 maintainer='Open Data Cube',
54 maintainer_email='',
55 description='An analysis environment for satellite and other earth observation data',
56 long_description=open('README.rst').read(),
57 long_description_content_type='text/x-rst',
58 license='Apache License 2.0',
59 classifiers=[
60 "Development Status :: 4 - Beta",
61 "Intended Audience :: Developers",
62 "Intended Audience :: Science/Research",
63 "License :: OSI Approved :: Apache Software License",
64 "Natural Language :: English",
65 "Operating System :: MacOS :: MacOS X",
66 "Operating System :: POSIX",
67 "Operating System :: POSIX :: BSD",
68 "Operating System :: POSIX :: Linux",
69 "Operating System :: Microsoft :: Windows",
70 "Programming Language :: Python",
71 "Programming Language :: Python :: 3",
72 "Programming Language :: Python :: 3.8",
73 "Programming Language :: Python :: 3.9",
74 "Programming Language :: Python :: 3.10",
75 "Topic :: Scientific/Engineering :: GIS",
76 "Topic :: Scientific/Engineering :: Information Analysis",
77 ],
78
79 packages=find_packages(
80 exclude=('tests', 'tests.*',
81 'integration_tests', 'integration_tests.*')
82 ),
83 package_data={
84 '': ['*.yaml', '*/*.yaml'],
85 },
86 scripts=[],
87 install_requires=[
88 'affine',
89 'pyproj>=2.5',
90 'shapely>=1.6.4',
91 'cachetools',
92 'click>=5.0',
93 'cloudpickle>=0.4',
94 'dask[array]',
95 'distributed',
96 'jsonschema',
97 'netcdf4',
98 'numpy',
99 'psycopg2',
100 'lark',
101 'pandas',
102 'python-dateutil',
103 'pyyaml',
104 'rasterio>=1.3.2', # Warping broken in 1.3.0 and 1.3.1
105 'sqlalchemy',
106 'GeoAlchemy2',
107 'toolz',
108 'xarray>=0.9,!=2022.6.0', # >0.9 fixes most problems with `crs` attributes being lost
109 ],
110 extras_require=extras_require,
111 tests_require=tests_require,
112
113 entry_points={
114 'console_scripts': [
115 'datacube = datacube.scripts.cli_app:cli',
116 'datacube-search = datacube.scripts.search_tool:cli',
117 'datacube-worker = datacube.execution.worker:main',
118 ],
119 'datacube.plugins.io.read': [
120 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',
121 *extra_plugins['read'],
122 ],
123 'datacube.plugins.io.write': [
124 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',
125 *extra_plugins['write'],
126 ],
127 'datacube.plugins.index': [
128 'default = datacube.index.postgres.index:index_driver_init',
129 'null = datacube.index.null.index:index_driver_init',
130 'memory = datacube.index.memory.index:index_driver_init',
131 'postgis = datacube.index.postgis.index:index_driver_init',
132 *extra_plugins['index'],
133 ],
134 },
135 )
136
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -82,6 +82,7 @@
),
package_data={
'': ['*.yaml', '*/*.yaml'],
+ 'datacube': ['py.typed'],
},
scripts=[],
install_requires=[
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -82,6 +82,7 @@\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n+ 'datacube': ['py.typed'],\n },\n scripts=[],\n install_requires=[\n", "issue": "Code includes Type Annotations, but they're not made available for type checking (PEP561)\n**Summary**\r\n\r\nThe ODC code is fairly well annotated with [Python type hints](https://typing.readthedocs.io/en/latest/), but these type hints aren't made availble for use in downstream packages by following [PEP 561 \u2013 Distributing and Packaging Type Information | peps.python.org](https://peps.python.org/pep-0561/).\r\n\r\n**Proposal**\r\n\r\nSince ODC Core includes inline type hints with the code, we need to follow [packaging type information - PEP561](https://peps.python.org/pep-0561/#packaging-type-information) by adding an empty `datacube/py.typed` file, and ensuring it's distributed by adding it to `package_data` in `setup.py`.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'hypothesis',\n 'pycodestyle',\n 'pylint',\n 'pytest',\n 'pytest-cov',\n 'pytest-timeout',\n 'pytest-httpserver',\n 'moto',\n]\ndoc_require = [\n 'Sphinx',\n 'sphinx_rtd_theme',\n 'sphinx_autodoc_typehints', # Propagate mypy info into docs\n 'sphinx-click',\n 'recommonmark',\n 'setuptools', # version related dependencies\n 'setuptools_scm[toml]',\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'distributed': ['distributed', 'dask[distributed]'],\n 'doc': doc_require,\n 's3': ['boto3', 'botocore'],\n 'test': tests_require,\n 'cf': ['compliance-checker>=4.0.0'],\n}\n\nextras_require['dev'] = sorted(set(sum([extras_require[k] for k in [\n 'test',\n 'doc',\n 'performance',\n 's3',\n 'distributed',\n]], [])))\n\n# An 'all' option, following ipython naming conventions.\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nextra_plugins = dict(read=[], write=[], index=[])\n\nsetup(\n name='datacube',\n python_requires='>=3.8.0',\n\n url='https://github.com/opendatacube/datacube-core',\n author='Open Data Cube',\n maintainer='Open Data Cube',\n maintainer_email='',\n description='An analysis environment for satellite and other earth observation data',\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n license='Apache License 2.0',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n\n packages=find_packages(\n exclude=('tests', 'tests.*',\n 'integration_tests', 'integration_tests.*')\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n },\n scripts=[],\n install_requires=[\n 'affine',\n 'pyproj>=2.5',\n 'shapely>=1.6.4',\n 'cachetools',\n 'click>=5.0',\n 'cloudpickle>=0.4',\n 'dask[array]',\n 'distributed',\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n 'psycopg2',\n 'lark',\n 'pandas',\n 'python-dateutil',\n 'pyyaml',\n 'rasterio>=1.3.2', # Warping broken in 1.3.0 and 1.3.1\n 'sqlalchemy',\n 'GeoAlchemy2',\n 'toolz',\n 'xarray>=0.9,!=2022.6.0', # >0.9 fixes most problems with `crs` attributes being lost\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n\n entry_points={\n 'console_scripts': [\n 'datacube = datacube.scripts.cli_app:cli',\n 'datacube-search = datacube.scripts.search_tool:cli',\n 'datacube-worker = datacube.execution.worker:main',\n ],\n 'datacube.plugins.io.read': [\n 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',\n *extra_plugins['read'],\n ],\n 'datacube.plugins.io.write': [\n 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',\n *extra_plugins['write'],\n ],\n 'datacube.plugins.index': [\n 'default = datacube.index.postgres.index:index_driver_init',\n 'null = datacube.index.null.index:index_driver_init',\n 'memory = datacube.index.memory.index:index_driver_init',\n 'postgis = datacube.index.postgis.index:index_driver_init',\n *extra_plugins['index'],\n ],\n },\n)\n", "path": "setup.py"}]} | 2,050 | 70 |
gh_patches_debug_26282 | rasdani/github-patches | git_diff | rotki__rotki-5256 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Think of a way to keep development accounts separately
## Abstract
Between multiple development machines and between `production`/`develop` it becomes harder to keep track of which database has been used by which branch. This can lead to partially updated databases being used.
As a proposal, we could somehow separate where the `develop` accounts and the production accounts are stored so that they are not sharing the same place.
We can always copy accounts from production to develop manually (this can go to the guide).
We should also consider how this affects nightlies and how nightlies are treated. For example we might want to treat nightlies as development to avoid having users mess accidentally with their production accounts.
## Motivation
Helps better track which accounts are used in `develop`/`production`.
## Specification
- TDB
</issue>
<code>
[start of rotkehlchen/config.py]
1 import logging
2 import os
3 import platform
4 import shutil
5 from pathlib import Path
6
7 from rotkehlchen.logging import RotkehlchenLogsAdapter
8
9 logger = logging.getLogger(__name__)
10 log = RotkehlchenLogsAdapter(logger)
11
12
13 def get_xdg_data_home() -> Path:
14 directory = os.environ.get('XDG_DATA_HOME', None)
15 if directory is None:
16 home = os.path.expanduser("~")
17 directory = os.path.join(home, '.local', 'share')
18
19 return Path(directory)
20
21
22 def get_win32_appdata() -> Path:
23 directory = os.environ.get('LOCALAPPDATA', None)
24 if not directory:
25 # In windows XP there is no localappdata
26 directory = os.environ.get('APPDATA', None)
27 if not directory:
28 raise AssertionError('Could not detect an APPDATA directory')
29
30 return Path(directory)
31
32
33 def old_data_directory() -> Path:
34 home = os.path.expanduser("~")
35 directory = os.path.join(home, '.rotkehlchen')
36 return Path(directory)
37
38
39 def default_data_directory() -> Path:
40 """Find the default data directory for rotki for each different OS
41
42 An interesting lirary that finds the data directories per OS is this:
43 https://github.com/ActiveState/appdirs/blob/master/appdirs.py
44 """
45 if platform.system() == 'Linux':
46 xdgconfig = get_xdg_data_home()
47 datadir = xdgconfig / 'rotki' / 'data'
48 elif platform.system() == 'Windows':
49 appdata = get_win32_appdata()
50 datadir = appdata / 'rotki' / 'data'
51 elif platform.system() == 'Darwin':
52 datadir = Path(os.path.expanduser('~/Library/Application Support/rotki/data'))
53 else:
54 raise AssertionError(f'rotki running in unknown system: {platform.system()}')
55
56 # If old data directory exists and new does not exist copy stuff
57 old_dir = old_data_directory()
58 if old_dir.exists() and not datadir.exists():
59 log.info(f'First time using standard data directory. Copying from {old_dir} to {datadir}')
60 shutil.copytree(old_dir, datadir)
61
62 datadir.mkdir(parents=True, exist_ok=True)
63 return datadir
64
[end of rotkehlchen/config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rotkehlchen/config.py b/rotkehlchen/config.py
--- a/rotkehlchen/config.py
+++ b/rotkehlchen/config.py
@@ -2,6 +2,7 @@
import os
import platform
import shutil
+import sys
from pathlib import Path
from rotkehlchen.logging import RotkehlchenLogsAdapter
@@ -42,14 +43,18 @@
An interesting lirary that finds the data directories per OS is this:
https://github.com/ActiveState/appdirs/blob/master/appdirs.py
"""
+ data_dir_name = 'data'
+ if getattr(sys, 'frozen', False) is False:
+ data_dir_name = 'develop_data'
+
if platform.system() == 'Linux':
xdgconfig = get_xdg_data_home()
- datadir = xdgconfig / 'rotki' / 'data'
+ datadir = xdgconfig / 'rotki' / data_dir_name
elif platform.system() == 'Windows':
appdata = get_win32_appdata()
- datadir = appdata / 'rotki' / 'data'
+ datadir = appdata / 'rotki' / data_dir_name
elif platform.system() == 'Darwin':
- datadir = Path(os.path.expanduser('~/Library/Application Support/rotki/data'))
+ datadir = Path(os.path.expanduser(f'~/Library/Application Support/rotki/{data_dir_name}')) # noqa: E501
else:
raise AssertionError(f'rotki running in unknown system: {platform.system()}')
| {"golden_diff": "diff --git a/rotkehlchen/config.py b/rotkehlchen/config.py\n--- a/rotkehlchen/config.py\n+++ b/rotkehlchen/config.py\n@@ -2,6 +2,7 @@\n import os\n import platform\n import shutil\n+import sys\n from pathlib import Path\n \n from rotkehlchen.logging import RotkehlchenLogsAdapter\n@@ -42,14 +43,18 @@\n An interesting lirary that finds the data directories per OS is this:\n https://github.com/ActiveState/appdirs/blob/master/appdirs.py\n \"\"\"\n+ data_dir_name = 'data'\n+ if getattr(sys, 'frozen', False) is False:\n+ data_dir_name = 'develop_data'\n+\n if platform.system() == 'Linux':\n xdgconfig = get_xdg_data_home()\n- datadir = xdgconfig / 'rotki' / 'data'\n+ datadir = xdgconfig / 'rotki' / data_dir_name\n elif platform.system() == 'Windows':\n appdata = get_win32_appdata()\n- datadir = appdata / 'rotki' / 'data'\n+ datadir = appdata / 'rotki' / data_dir_name\n elif platform.system() == 'Darwin':\n- datadir = Path(os.path.expanduser('~/Library/Application Support/rotki/data'))\n+ datadir = Path(os.path.expanduser(f'~/Library/Application Support/rotki/{data_dir_name}')) # noqa: E501\n else:\n raise AssertionError(f'rotki running in unknown system: {platform.system()}')\n", "issue": "Think of a way to keep development accounts separately \n## Abstract\r\n\r\nBetween multiple development machines and between `production`/`develop` it becomes harder to keep track of which database has been used by which branch. This can lead to partially updated databases being used.\r\n\r\nAs a proposal, we could somehow separate where the `develop` accounts and the production accounts are stored so that they are not sharing the same place.\r\n\r\nWe can always copy accounts from production to develop manually (this can go to the guide).\r\n\r\nWe should also consider how this affects nightlies and how nightlies are treated. For example we might want to treat nightlies as development to avoid having users mess accidentally with their production accounts. \r\n\r\n## Motivation\r\n\r\nHelps better track which accounts are used in `develop`/`production`.\r\n\r\n## Specification\r\n\r\n- TDB\r\n\n", "before_files": [{"content": "import logging\nimport os\nimport platform\nimport shutil\nfrom pathlib import Path\n\nfrom rotkehlchen.logging import RotkehlchenLogsAdapter\n\nlogger = logging.getLogger(__name__)\nlog = RotkehlchenLogsAdapter(logger)\n\n\ndef get_xdg_data_home() -> Path:\n directory = os.environ.get('XDG_DATA_HOME', None)\n if directory is None:\n home = os.path.expanduser(\"~\")\n directory = os.path.join(home, '.local', 'share')\n\n return Path(directory)\n\n\ndef get_win32_appdata() -> Path:\n directory = os.environ.get('LOCALAPPDATA', None)\n if not directory:\n # In windows XP there is no localappdata\n directory = os.environ.get('APPDATA', None)\n if not directory:\n raise AssertionError('Could not detect an APPDATA directory')\n\n return Path(directory)\n\n\ndef old_data_directory() -> Path:\n home = os.path.expanduser(\"~\")\n directory = os.path.join(home, '.rotkehlchen')\n return Path(directory)\n\n\ndef default_data_directory() -> Path:\n \"\"\"Find the default data directory for rotki for each different OS\n\n An interesting lirary that finds the data directories per OS is this:\n https://github.com/ActiveState/appdirs/blob/master/appdirs.py\n \"\"\"\n if platform.system() == 'Linux':\n xdgconfig = get_xdg_data_home()\n datadir = xdgconfig / 'rotki' / 'data'\n elif platform.system() == 'Windows':\n appdata = get_win32_appdata()\n datadir = appdata / 'rotki' / 'data'\n elif platform.system() == 'Darwin':\n datadir = Path(os.path.expanduser('~/Library/Application Support/rotki/data'))\n else:\n raise AssertionError(f'rotki running in unknown system: {platform.system()}')\n\n # If old data directory exists and new does not exist copy stuff\n old_dir = old_data_directory()\n if old_dir.exists() and not datadir.exists():\n log.info(f'First time using standard data directory. Copying from {old_dir} to {datadir}')\n shutil.copytree(old_dir, datadir)\n\n datadir.mkdir(parents=True, exist_ok=True)\n return datadir\n", "path": "rotkehlchen/config.py"}]} | 1,320 | 357 |
gh_patches_debug_9457 | rasdani/github-patches | git_diff | pypa__setuptools-3296 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] ImportError when using `importlib` with setuptools 60.9.0+
### setuptools version
`setuptools==62.1.0`
(The repro below uses the latest setuptools version, however, the first affected version was `setuptools==60.9.0`)
### Python version
Python 3.7
### OS
Docker image `python:3.7.13` which is Debian 11 (it also occurs on Ubuntu on Heroku)
### Additional environment information
Reduced `requirements.txt`:
```
celery==5.2.2
Django==3.2.8
importlib-metadata==0.20
```
### Description
Between setuptools 60.8.2 and 60.9.0, a previously working Django project now fails when using `importlib.import_module()`:
```
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/usr/local/lib/python3.7/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1006, in _gcd_import
File "<frozen importlib._bootstrap>", line 983, in _find_and_load
File "<frozen importlib._bootstrap>", line 967, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 677, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 728, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/testcase.py", line 2, in <module>
from celery import Celery
ImportError: cannot import name 'Celery' from 'celery' (/usr/local/lib/python3.7/site-packages/celery/__init__.py)
```
In the original project (which was from a customer), the `importlib.import_module()` usage was inside gunicorn, when it loads the provided WSGI application file. However, I've removed gunicorn (and much of the rest of the project) as part of creating a reduced testcase.
The changes between those two setuptools versions are:
https://github.com/pypa/setuptools/compare/v60.8.2...v60.9.0
In addition to the workaround of downgrading setuptools to `60.8.2`, I also found that making any of the following changes (each in isolation) prevents the error from occurring:
- Upgrading Python to 3.8+
- Upgrading importlib-metadata from 0.20 to 0.21+ ([changelog](https://github.com/python/importlib_metadata/compare/0.20...0.21))
- Upgrading Django from 3.2.8 to 3.2.9+ ([changelog](https://github.com/django/django/compare/3.2.8...3.2.9), of which [this](https://github.com/django/django/commit/8bebb1c04a40a8bb45368415cd01ccbf32fd3236) seems relevant)
- Upgrading Celery from 5.2.2 to 5.2.5 ([changelog](https://github.com/celery/celery/compare/v5.2.2...v5.2.5), of which [this](https://github.com/celery/celery/pull/7218) seems relevant) (skipped 5.2.3 and 5.2.4 since they pin setuptools to an earlier version that isn't affected by this)
### Expected behavior
Either:
1. The import succeed with newer versions of setuptools, as it did before.
2. Or, setuptools outputs a warning/error to make debugging this easier, or otherwise blocks usage with incompatible `importlib-metadata` versions.
### How to Reproduce
1. `docker run --rm -it python:3.7.13 bash`
2. `echo -e "from django.core.wsgi import get_wsgi_application\nfrom celery import Celery" > testcase.py`
3. `pip install setuptools==62.1.0`
4. `pip install celery==5.2.2 Django==3.2.8 importlib-metadata==0.20`
5. `python -c 'import importlib; importlib.import_module("testcase")'`
### Output
```console
$ docker run --rm -it python:3.7.13 bash
...
root@030ec79ad5e2:/# echo -e "from django.core.wsgi import get_wsgi_application\nfrom celery import Celery" > testcase.py
root@030ec79ad5e2:/# pip install setuptools==62.1.0
...
root@030ec79ad5e2:/# pip install celery==5.2.2 Django==3.2.8 importlib-metadata==0.20
...
Installing collected packages: wcwidth, pytz, cached-property, billiard, zipp, vine, typing-extensions, sqlparse, six, prompt-toolkit, importlib-metadata, asgiref, amqp, kombu, Django, click, click-repl, click-plugins, click-didyoumean, celery
Successfully installed Django-3.2.8 amqp-5.1.1 asgiref-3.5.0 billiard-3.6.4.0 cached-property-1.5.2 celery-5.2.2 click-8.1.2 click-didyoumean-0.3.0 click-plugins-1.1.1 click-repl-0.2.0 importlib-metadata-0.20 kombu-5.2.4 prompt-toolkit-3.0.29 pytz-2022.1 six-1.16.0 sqlparse-0.4.2 typing-extensions-4.2.0 vine-5.0.0 wcwidth-0.2.5 zipp-3.8.0
root@030ec79ad5e2:/# python -c 'import importlib; importlib.import_module("testcase")'
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/usr/local/lib/python3.7/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1006, in _gcd_import
File "<frozen importlib._bootstrap>", line 983, in _find_and_load
File "<frozen importlib._bootstrap>", line 967, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 677, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 728, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/testcase.py", line 2, in <module>
from celery import Celery
ImportError: cannot import name 'Celery' from 'celery' (/usr/local/lib/python3.7/site-packages/celery/__init__.py)
```
</issue>
<code>
[start of setuptools/_importlib.py]
1 import sys
2
3
4 def disable_importlib_metadata_finder(metadata):
5 """
6 Ensure importlib_metadata doesn't provide older, incompatible
7 Distributions.
8
9 Workaround for #3102.
10 """
11 try:
12 import importlib_metadata
13 except ImportError:
14 return
15 if importlib_metadata is metadata:
16 return
17 to_remove = [
18 ob
19 for ob in sys.meta_path
20 if isinstance(ob, importlib_metadata.MetadataPathFinder)
21 ]
22 for item in to_remove:
23 sys.meta_path.remove(item)
24
25
26 if sys.version_info < (3, 10):
27 from setuptools.extern import importlib_metadata as metadata
28 disable_importlib_metadata_finder(metadata)
29 else:
30 import importlib.metadata as metadata # noqa: F401
31
32
33 if sys.version_info < (3, 9):
34 from setuptools.extern import importlib_resources as resources
35 else:
36 import importlib.resources as resources # noqa: F401
37
[end of setuptools/_importlib.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setuptools/_importlib.py b/setuptools/_importlib.py
--- a/setuptools/_importlib.py
+++ b/setuptools/_importlib.py
@@ -12,6 +12,17 @@
import importlib_metadata
except ImportError:
return
+ except AttributeError:
+ import warnings
+
+ msg = (
+ "`importlib-metadata` version is incompatible with `setuptools`.\n"
+ "This problem is likely to be solved by installing an updated version of "
+ "`importlib-metadata`."
+ )
+ warnings.warn(msg) # Ensure a descriptive message is shown.
+ raise # This exception can be suppressed by _distutils_hack
+
if importlib_metadata is metadata:
return
to_remove = [
| {"golden_diff": "diff --git a/setuptools/_importlib.py b/setuptools/_importlib.py\n--- a/setuptools/_importlib.py\n+++ b/setuptools/_importlib.py\n@@ -12,6 +12,17 @@\n import importlib_metadata\n except ImportError:\n return\n+ except AttributeError:\n+ import warnings\n+\n+ msg = (\n+ \"`importlib-metadata` version is incompatible with `setuptools`.\\n\"\n+ \"This problem is likely to be solved by installing an updated version of \"\n+ \"`importlib-metadata`.\"\n+ )\n+ warnings.warn(msg) # Ensure a descriptive message is shown.\n+ raise # This exception can be suppressed by _distutils_hack\n+\n if importlib_metadata is metadata:\n return\n to_remove = [\n", "issue": "[BUG] ImportError when using `importlib` with setuptools 60.9.0+\n### setuptools version\r\n\r\n`setuptools==62.1.0`\r\n\r\n(The repro below uses the latest setuptools version, however, the first affected version was `setuptools==60.9.0`)\r\n\r\n### Python version\r\n\r\nPython 3.7\r\n\r\n### OS\r\n\r\nDocker image `python:3.7.13` which is Debian 11 (it also occurs on Ubuntu on Heroku)\r\n\r\n### Additional environment information\r\n\r\nReduced `requirements.txt`:\r\n\r\n```\r\ncelery==5.2.2\r\nDjango==3.2.8\r\nimportlib-metadata==0.20\r\n```\r\n\r\n### Description\r\n\r\nBetween setuptools 60.8.2 and 60.9.0, a previously working Django project now fails when using `importlib.import_module()`:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/usr/local/lib/python3.7/importlib/__init__.py\", line 127, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 1006, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 983, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 967, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 677, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 728, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/testcase.py\", line 2, in <module>\r\n from celery import Celery\r\nImportError: cannot import name 'Celery' from 'celery' (/usr/local/lib/python3.7/site-packages/celery/__init__.py)\r\n```\r\n\r\nIn the original project (which was from a customer), the `importlib.import_module()` usage was inside gunicorn, when it loads the provided WSGI application file. However, I've removed gunicorn (and much of the rest of the project) as part of creating a reduced testcase.\r\n\r\nThe changes between those two setuptools versions are:\r\nhttps://github.com/pypa/setuptools/compare/v60.8.2...v60.9.0\r\n\r\nIn addition to the workaround of downgrading setuptools to `60.8.2`, I also found that making any of the following changes (each in isolation) prevents the error from occurring:\r\n- Upgrading Python to 3.8+\r\n- Upgrading importlib-metadata from 0.20 to 0.21+ ([changelog](https://github.com/python/importlib_metadata/compare/0.20...0.21))\r\n- Upgrading Django from 3.2.8 to 3.2.9+ ([changelog](https://github.com/django/django/compare/3.2.8...3.2.9), of which [this](https://github.com/django/django/commit/8bebb1c04a40a8bb45368415cd01ccbf32fd3236) seems relevant)\r\n- Upgrading Celery from 5.2.2 to 5.2.5 ([changelog](https://github.com/celery/celery/compare/v5.2.2...v5.2.5), of which [this](https://github.com/celery/celery/pull/7218) seems relevant) (skipped 5.2.3 and 5.2.4 since they pin setuptools to an earlier version that isn't affected by this)\r\n\r\n### Expected behavior\r\n\r\nEither:\r\n1. The import succeed with newer versions of setuptools, as it did before.\r\n2. Or, setuptools outputs a warning/error to make debugging this easier, or otherwise blocks usage with incompatible `importlib-metadata` versions.\r\n\r\n### How to Reproduce\r\n\r\n1. `docker run --rm -it python:3.7.13 bash`\r\n2. `echo -e \"from django.core.wsgi import get_wsgi_application\\nfrom celery import Celery\" > testcase.py`\r\n3. `pip install setuptools==62.1.0`\r\n4. `pip install celery==5.2.2 Django==3.2.8 importlib-metadata==0.20`\r\n5. `python -c 'import importlib; importlib.import_module(\"testcase\")'`\r\n\r\n### Output\r\n\r\n```console\r\n$ docker run --rm -it python:3.7.13 bash\r\n...\r\nroot@030ec79ad5e2:/# echo -e \"from django.core.wsgi import get_wsgi_application\\nfrom celery import Celery\" > testcase.py\r\nroot@030ec79ad5e2:/# pip install setuptools==62.1.0\r\n...\r\nroot@030ec79ad5e2:/# pip install celery==5.2.2 Django==3.2.8 importlib-metadata==0.20\r\n...\r\nInstalling collected packages: wcwidth, pytz, cached-property, billiard, zipp, vine, typing-extensions, sqlparse, six, prompt-toolkit, importlib-metadata, asgiref, amqp, kombu, Django, click, click-repl, click-plugins, click-didyoumean, celery\r\nSuccessfully installed Django-3.2.8 amqp-5.1.1 asgiref-3.5.0 billiard-3.6.4.0 cached-property-1.5.2 celery-5.2.2 click-8.1.2 click-didyoumean-0.3.0 click-plugins-1.1.1 click-repl-0.2.0 importlib-metadata-0.20 kombu-5.2.4 prompt-toolkit-3.0.29 pytz-2022.1 six-1.16.0 sqlparse-0.4.2 typing-extensions-4.2.0 vine-5.0.0 wcwidth-0.2.5 zipp-3.8.0\r\n\r\nroot@030ec79ad5e2:/# python -c 'import importlib; importlib.import_module(\"testcase\")'\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/usr/local/lib/python3.7/importlib/__init__.py\", line 127, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 1006, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 983, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 967, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 677, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 728, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/testcase.py\", line 2, in <module>\r\n from celery import Celery\r\nImportError: cannot import name 'Celery' from 'celery' (/usr/local/lib/python3.7/site-packages/celery/__init__.py)\r\n```\n", "before_files": [{"content": "import sys\n\n\ndef disable_importlib_metadata_finder(metadata):\n \"\"\"\n Ensure importlib_metadata doesn't provide older, incompatible\n Distributions.\n\n Workaround for #3102.\n \"\"\"\n try:\n import importlib_metadata\n except ImportError:\n return\n if importlib_metadata is metadata:\n return\n to_remove = [\n ob\n for ob in sys.meta_path\n if isinstance(ob, importlib_metadata.MetadataPathFinder)\n ]\n for item in to_remove:\n sys.meta_path.remove(item)\n\n\nif sys.version_info < (3, 10):\n from setuptools.extern import importlib_metadata as metadata\n disable_importlib_metadata_finder(metadata)\nelse:\n import importlib.metadata as metadata # noqa: F401\n\n\nif sys.version_info < (3, 9):\n from setuptools.extern import importlib_resources as resources\nelse:\n import importlib.resources as resources # noqa: F401\n", "path": "setuptools/_importlib.py"}]} | 2,463 | 173 |
gh_patches_debug_12204 | rasdani/github-patches | git_diff | conda__conda-5273 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
conda env export under python2 is ug
```
$ python2 -m conda_env export -p /conda
name: null
channels:
- !!python/unicode
'file:///Users/kfranz/.conda/conda-bld'
- !!python/unicode
'file:///conda/conda-bld'
- !!python/unicode
'bkreider'
- !!python/unicode
'conda-canary'
- !!python/unicode
'conda-forge'
- !!python/unicode
'defaults'
dependencies:
- !!python/unicode
'wget=1.15=2'
- !!python/unicode
'conda=4.3.0=py27_0'
- !!python/unicode
'conda-env=2.6.0=0'
- !!python/unicode
'filelock=2.0.6=py27_0'
- !!python/unicode
'boltons=16.3.1=py27_0'
- !!python/unicode
'ca-certificates=2016.8.31=0'
- !!python/unicode
'certifi=2016.8.31=py27_0'
- !!python/unicode
'functools32=3.2.3.2=py27_1'
...
```
</issue>
<code>
[start of conda_env/yaml.py]
1 """
2 Wrapper around yaml to ensure that everything is ordered correctly.
3
4 This is based on the answer at http://stackoverflow.com/a/16782282
5 """
6 from __future__ import absolute_import, print_function
7 from collections import OrderedDict
8
9 from conda.common.yaml import get_yaml
10 yaml = get_yaml()
11
12
13 def represent_ordereddict(dumper, data):
14 value = []
15
16 for item_key, item_value in data.items():
17 node_key = dumper.represent_data(item_key)
18 node_value = dumper.represent_data(item_value)
19
20 value.append((node_key, node_value))
21
22 return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value)
23
24
25 yaml.add_representer(OrderedDict, represent_ordereddict)
26
27 dump = yaml.dump
28 load = yaml.load
29 dict = OrderedDict
30
[end of conda_env/yaml.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conda_env/yaml.py b/conda_env/yaml.py
--- a/conda_env/yaml.py
+++ b/conda_env/yaml.py
@@ -6,6 +6,7 @@
from __future__ import absolute_import, print_function
from collections import OrderedDict
+from conda.common.compat import PY2
from conda.common.yaml import get_yaml
yaml = get_yaml()
@@ -24,6 +25,12 @@
yaml.add_representer(OrderedDict, represent_ordereddict)
+if PY2:
+ def represent_unicode(self, data):
+ return self.represent_str(data.encode('utf-8'))
+
+ yaml.add_representer(unicode, represent_unicode) # NOQA
+
dump = yaml.dump
load = yaml.load
dict = OrderedDict
| {"golden_diff": "diff --git a/conda_env/yaml.py b/conda_env/yaml.py\n--- a/conda_env/yaml.py\n+++ b/conda_env/yaml.py\n@@ -6,6 +6,7 @@\n from __future__ import absolute_import, print_function\n from collections import OrderedDict\n \n+from conda.common.compat import PY2\n from conda.common.yaml import get_yaml\n yaml = get_yaml()\n \n@@ -24,6 +25,12 @@\n \n yaml.add_representer(OrderedDict, represent_ordereddict)\n \n+if PY2:\n+ def represent_unicode(self, data):\n+ return self.represent_str(data.encode('utf-8'))\n+\n+ yaml.add_representer(unicode, represent_unicode) # NOQA\n+\n dump = yaml.dump\n load = yaml.load\n dict = OrderedDict\n", "issue": "conda env export under python2 is ug\n```\r\n$ python2 -m conda_env export -p /conda\r\nname: null\r\nchannels:\r\n- !!python/unicode\r\n 'file:///Users/kfranz/.conda/conda-bld'\r\n- !!python/unicode\r\n 'file:///conda/conda-bld'\r\n- !!python/unicode\r\n 'bkreider'\r\n- !!python/unicode\r\n 'conda-canary'\r\n- !!python/unicode\r\n 'conda-forge'\r\n- !!python/unicode\r\n 'defaults'\r\ndependencies:\r\n- !!python/unicode\r\n 'wget=1.15=2'\r\n- !!python/unicode\r\n 'conda=4.3.0=py27_0'\r\n- !!python/unicode\r\n 'conda-env=2.6.0=0'\r\n- !!python/unicode\r\n 'filelock=2.0.6=py27_0'\r\n- !!python/unicode\r\n 'boltons=16.3.1=py27_0'\r\n- !!python/unicode\r\n 'ca-certificates=2016.8.31=0'\r\n- !!python/unicode\r\n 'certifi=2016.8.31=py27_0'\r\n- !!python/unicode\r\n 'functools32=3.2.3.2=py27_1'\r\n...\r\n```\n", "before_files": [{"content": "\"\"\"\nWrapper around yaml to ensure that everything is ordered correctly.\n\nThis is based on the answer at http://stackoverflow.com/a/16782282\n\"\"\"\nfrom __future__ import absolute_import, print_function\nfrom collections import OrderedDict\n\nfrom conda.common.yaml import get_yaml\nyaml = get_yaml()\n\n\ndef represent_ordereddict(dumper, data):\n value = []\n\n for item_key, item_value in data.items():\n node_key = dumper.represent_data(item_key)\n node_value = dumper.represent_data(item_value)\n\n value.append((node_key, node_value))\n\n return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value)\n\n\nyaml.add_representer(OrderedDict, represent_ordereddict)\n\ndump = yaml.dump\nload = yaml.load\ndict = OrderedDict\n", "path": "conda_env/yaml.py"}]} | 1,073 | 177 |
gh_patches_debug_11179 | rasdani/github-patches | git_diff | akvo__akvo-rsr-2213 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add reporting template for Plan Finland
We should add the reporting template that @stellanl and @Geerts are working on to the "My reports" section. Preferably so that only superusers / admins / Plan Finland employees can see this, but we might need a little hack for that.
</issue>
<code>
[start of akvo/rsr/reports.py]
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from django.utils.translation import ugettext_lazy as _
8
9 # Data for all available reports from ReportServer, with the following fields:
10 # - key: A unique identifier for the report
11 # - title: The title of the report, will be shown on the 'My reports' page as such
12 # - description: The description of the report, as displayed on the 'My reports' page
13 # - formats: The available formats for the report, see options below
14 # - parameters: The available parameters for the report, options; ['project', 'organisation']
15 # - url: The URL where the report is available. Parameter(s) should be indicated in between {..}'s.
16
17 REPORTS = [
18 {
19 'key': 'results-framework',
20 'title': unicode(_('Results and indicators overview')),
21 'description': unicode(_('This report gives an overview of the status of your project\'s '
22 'results and indicators.')),
23 'formats': ['pdf',],
24 'parameters': ['project', ],
25 'url': '/en/reports/project_results/{project}?format={format}&download=true'
26 },
27 {
28 'key': 'results-simple-table',
29 'title': unicode(_('Results and indicators table')),
30 'description': unicode(_('This report provides a view of your project\'s results and '
31 'indicators data in a table.')),
32 'formats': ['excel',],
33 'parameters': ['project', ],
34 'url': '/en/reports/project_results_simple_table/{project}?format={format}&download=true'
35 },
36 {
37 'key': 'projects-overview',
38 'title': unicode(_('Projects overview')),
39 'description': unicode(_('This report provides information about your organisation\'s '
40 'projects: amount of updates, country, total budgets, project '
41 'statuses, start- and end dates.')),
42 'formats': ['pdf', 'excel'],
43 'parameters': ['organisation', ],
44 'url': '/en/reports/project_overview/{organisation}?format={format}&download=true'
45 },
46 {
47 'key': 'data-quality',
48 'title': unicode(_('Data quality overview')),
49 'description': unicode(_('This report gives an overview of your organisation\'s projects '
50 'that have passed the planned end date, need funding or that '
51 'haven\'t been edited or updated for 3 months.')),
52 'formats': ['pdf', 'excel'],
53 'parameters': ['organisation', ],
54 'url': '/en/reports/data_quality/{organisation}?format={format}&download=true'
55 }
56 ]
57
58 # Data for all available formats from ReportServer, with the following fields:
59 # - key: A unique identifier for the format, also used in the formats field of the reports
60 # - displayName: The display name of the format, as displayed on the 'My reports' page
61 # - icon: The font awesome icon of the format, as displayed on the 'My reports' page
62
63 FORMATS = [
64 {
65 'key': 'pdf',
66 'displayName': 'PDF',
67 'icon': 'file-pdf-o',
68 },
69 {
70 'key': 'excel',
71 'displayName': 'Excel',
72 'icon': 'file-excel-o',
73 },
74 {
75 'key': 'word',
76 'displayName': 'Word',
77 'icon': 'file-word-o',
78 },
79 {
80 'key': 'html',
81 'displayName': 'HTML',
82 'icon': 'code',
83 },
84 ]
85
[end of akvo/rsr/reports.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rsr/reports.py b/akvo/rsr/reports.py
--- a/akvo/rsr/reports.py
+++ b/akvo/rsr/reports.py
@@ -52,6 +52,15 @@
'formats': ['pdf', 'excel'],
'parameters': ['organisation', ],
'url': '/en/reports/data_quality/{organisation}?format={format}&download=true'
+ },
+ {
+ 'key': 'plan-finland',
+ 'title': unicode(_('Plan Finland report')),
+ 'description': unicode(_('This custom MFA report for Plan Finland gives an overview of the '
+ 'hierarchy of Plan Finland\'s projects and their results.')),
+ 'formats': ['pdf', ],
+ 'parameters': ['project', ],
+ 'url': '/en/reports/plan_finland/{project}?format={format}&download=true'
}
]
| {"golden_diff": "diff --git a/akvo/rsr/reports.py b/akvo/rsr/reports.py\n--- a/akvo/rsr/reports.py\n+++ b/akvo/rsr/reports.py\n@@ -52,6 +52,15 @@\n 'formats': ['pdf', 'excel'],\n 'parameters': ['organisation', ],\n 'url': '/en/reports/data_quality/{organisation}?format={format}&download=true'\n+ },\n+ {\n+ 'key': 'plan-finland',\n+ 'title': unicode(_('Plan Finland report')),\n+ 'description': unicode(_('This custom MFA report for Plan Finland gives an overview of the '\n+ 'hierarchy of Plan Finland\\'s projects and their results.')),\n+ 'formats': ['pdf', ],\n+ 'parameters': ['project', ],\n+ 'url': '/en/reports/plan_finland/{project}?format={format}&download=true'\n }\n ]\n", "issue": "Add reporting template for Plan Finland\nWe should add the reporting template that @stellanl and @Geerts are working on to the \"My reports\" section. Preferably so that only superusers / admins / Plan Finland employees can see this, but we might need a little hack for that.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.utils.translation import ugettext_lazy as _\n\n# Data for all available reports from ReportServer, with the following fields:\n# - key: A unique identifier for the report\n# - title: The title of the report, will be shown on the 'My reports' page as such\n# - description: The description of the report, as displayed on the 'My reports' page\n# - formats: The available formats for the report, see options below\n# - parameters: The available parameters for the report, options; ['project', 'organisation']\n# - url: The URL where the report is available. Parameter(s) should be indicated in between {..}'s.\n\nREPORTS = [\n {\n 'key': 'results-framework',\n 'title': unicode(_('Results and indicators overview')),\n 'description': unicode(_('This report gives an overview of the status of your project\\'s '\n 'results and indicators.')),\n 'formats': ['pdf',],\n 'parameters': ['project', ],\n 'url': '/en/reports/project_results/{project}?format={format}&download=true'\n },\n {\n 'key': 'results-simple-table',\n 'title': unicode(_('Results and indicators table')),\n 'description': unicode(_('This report provides a view of your project\\'s results and '\n 'indicators data in a table.')),\n 'formats': ['excel',],\n 'parameters': ['project', ],\n 'url': '/en/reports/project_results_simple_table/{project}?format={format}&download=true'\n },\n {\n 'key': 'projects-overview',\n 'title': unicode(_('Projects overview')),\n 'description': unicode(_('This report provides information about your organisation\\'s '\n 'projects: amount of updates, country, total budgets, project '\n 'statuses, start- and end dates.')),\n 'formats': ['pdf', 'excel'],\n 'parameters': ['organisation', ],\n 'url': '/en/reports/project_overview/{organisation}?format={format}&download=true'\n },\n {\n 'key': 'data-quality',\n 'title': unicode(_('Data quality overview')),\n 'description': unicode(_('This report gives an overview of your organisation\\'s projects '\n 'that have passed the planned end date, need funding or that '\n 'haven\\'t been edited or updated for 3 months.')),\n 'formats': ['pdf', 'excel'],\n 'parameters': ['organisation', ],\n 'url': '/en/reports/data_quality/{organisation}?format={format}&download=true'\n }\n]\n\n# Data for all available formats from ReportServer, with the following fields:\n# - key: A unique identifier for the format, also used in the formats field of the reports\n# - displayName: The display name of the format, as displayed on the 'My reports' page\n# - icon: The font awesome icon of the format, as displayed on the 'My reports' page\n\nFORMATS = [\n {\n 'key': 'pdf',\n 'displayName': 'PDF',\n 'icon': 'file-pdf-o',\n },\n {\n 'key': 'excel',\n 'displayName': 'Excel',\n 'icon': 'file-excel-o',\n },\n {\n 'key': 'word',\n 'displayName': 'Word',\n 'icon': 'file-word-o',\n },\n {\n 'key': 'html',\n 'displayName': 'HTML',\n 'icon': 'code',\n },\n]\n", "path": "akvo/rsr/reports.py"}]} | 1,555 | 203 |
gh_patches_debug_26692 | rasdani/github-patches | git_diff | google__fuzzbench-291 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[GCP] Runners are not started.
I pulled master and tried to evaluate libfuzzer against honggfuzz in 5 trials for 1 hour on 1 benchmark (mbedtls_fuzz_dtlsclient). It doesn't generate the report anymore. The web bucket is empty, the experiments-result folder does not exist in the data bucket, the SQL database is empty, and the Error Reporting gives the following error:
```
ValueError: Empty experiment data. Message: Error generating HTML report.
at validate_data (/work/src/analysis/data_utils.py:21)
at generate_report (/work/src/analysis/generate_report.py:132)
at output_report (/work/src/experiment/reporter.py:43)
```
I deleted authorization keys of the service account. I deleted the old and set up a new SQL database (incl. `alembic upgrade head`). I cleaned out the container registry (by deleting the `container` folder in the corresponding bucket). I cleaned out the Cloud Builds (by deleting `source` folder in the corresponding bucket). It recreates the containers and builds, when I start the dispatcher. The dispatcher runs properly. I SSH'ed into a random runner: `docker images` and `docker ps -a` return empty-handed. Is the recent setup gcr.io/fuzzbench-specific? Any suggestion to debug?
</issue>
<code>
[start of common/benchmark_utils.py]
1 # Copyright 2020 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Code for dealing with benchmarks."""
15 import os
16 import re
17
18 from common import experiment_utils
19 from common import fuzzer_utils
20 from common import logs
21 from common import oss_fuzz
22 from common import utils
23
24 VALID_BENCHMARK_REGEX = re.compile(r'^[A-Za-z0-9\._\-]+$')
25
26
27 def is_oss_fuzz(benchmark):
28 """Returns True if |benchmark| is OSS-Fuzz-based project."""
29 return os.path.isfile(oss_fuzz.get_config_file(benchmark))
30
31
32 def get_project(benchmark):
33 """Returns the OSS-Fuzz project of |benchmark| if it is based on an
34 OSS-Fuzz project, otherwise raises ValueError."""
35 if is_oss_fuzz(benchmark):
36 return oss_fuzz.get_config(benchmark)['project']
37 raise ValueError('Can only get project on OSS-Fuzz benchmarks.')
38
39
40 def get_fuzz_target(benchmark):
41 """Returns the fuzz target of |benchmark|"""
42 if is_oss_fuzz(benchmark):
43 return oss_fuzz.get_config(benchmark)['fuzz_target']
44 return fuzzer_utils.DEFAULT_FUZZ_TARGET_NAME
45
46
47 def get_runner_image_url(benchmark, fuzzer, cloud_project):
48 """Get the URL of the docker runner image for fuzzing the benchmark with
49 fuzzer."""
50 base_tag = experiment_utils.get_base_docker_tag(cloud_project)
51 if is_oss_fuzz(benchmark):
52 return '{base_tag}/oss-fuzz/runners/{fuzzer}/{project}'.format(
53 base_tag=base_tag, fuzzer=fuzzer, project=get_project(benchmark))
54 return '{base_tag}/runners/{fuzzer}/{benchmark}'.format(base_tag=base_tag,
55 fuzzer=fuzzer,
56 benchmark=benchmark)
57
58
59 def get_builder_image_url(benchmark, fuzzer, cloud_project):
60 """Get the URL of the docker builder image for fuzzing the benchmark with
61 fuzzer."""
62 base_tag = experiment_utils.get_base_docker_tag(cloud_project)
63 if is_oss_fuzz(benchmark):
64 return '{base_tag}/oss-fuzz/builders/{fuzzer}/{project}'.format(
65 base_tag=base_tag, fuzzer=fuzzer, project=get_project(benchmark))
66 return '{base_tag}/builders/{fuzzer}/{benchmark}'.format(
67 base_tag=base_tag, fuzzer=fuzzer, benchmark=benchmark)
68
69
70 def get_oss_fuzz_builder_hash(benchmark):
71 """Get the specified hash of the OSS-Fuzz builder for the OSS-Fuzz project
72 used by |benchmark|."""
73 if is_oss_fuzz(benchmark):
74 return oss_fuzz.get_config(benchmark)['oss_fuzz_builder_hash']
75 raise ValueError('Can only get project on OSS-Fuzz benchmarks.')
76
77
78 def validate(benchmark):
79 """Return True if |benchmark| is a valid fuzzbench fuzzer."""
80 if VALID_BENCHMARK_REGEX.match(benchmark) is None:
81 logs.error('%s does not conform to %s pattern.', benchmark,
82 VALID_BENCHMARK_REGEX.pattern)
83 return False
84 if benchmark in get_all_benchmarks():
85 return True
86 logs.error('%s must have a build.sh or oss-fuzz.yaml.', benchmark)
87 return False
88
89
90 def get_all_benchmarks():
91 """Returns the list of all benchmarks."""
92 benchmarks_dir = os.path.join(utils.ROOT_DIR, 'benchmarks')
93 all_benchmarks = []
94 for benchmark in os.listdir(benchmarks_dir):
95 benchmark_path = os.path.join(benchmarks_dir, benchmark)
96 if os.path.isfile(os.path.join(benchmark_path, 'oss-fuzz.yaml')):
97 # Benchmark is an OSS-Fuzz benchmark.
98 all_benchmarks.append(benchmark)
99 elif os.path.isfile(os.path.join(benchmark_path, 'build.sh')):
100 # Benchmark is a standard benchmark.
101 all_benchmarks.append(benchmark)
102 return all_benchmarks
103
[end of common/benchmark_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/common/benchmark_utils.py b/common/benchmark_utils.py
--- a/common/benchmark_utils.py
+++ b/common/benchmark_utils.py
@@ -48,9 +48,6 @@
"""Get the URL of the docker runner image for fuzzing the benchmark with
fuzzer."""
base_tag = experiment_utils.get_base_docker_tag(cloud_project)
- if is_oss_fuzz(benchmark):
- return '{base_tag}/oss-fuzz/runners/{fuzzer}/{project}'.format(
- base_tag=base_tag, fuzzer=fuzzer, project=get_project(benchmark))
return '{base_tag}/runners/{fuzzer}/{benchmark}'.format(base_tag=base_tag,
fuzzer=fuzzer,
benchmark=benchmark)
@@ -60,9 +57,6 @@
"""Get the URL of the docker builder image for fuzzing the benchmark with
fuzzer."""
base_tag = experiment_utils.get_base_docker_tag(cloud_project)
- if is_oss_fuzz(benchmark):
- return '{base_tag}/oss-fuzz/builders/{fuzzer}/{project}'.format(
- base_tag=base_tag, fuzzer=fuzzer, project=get_project(benchmark))
return '{base_tag}/builders/{fuzzer}/{benchmark}'.format(
base_tag=base_tag, fuzzer=fuzzer, benchmark=benchmark)
| {"golden_diff": "diff --git a/common/benchmark_utils.py b/common/benchmark_utils.py\n--- a/common/benchmark_utils.py\n+++ b/common/benchmark_utils.py\n@@ -48,9 +48,6 @@\n \"\"\"Get the URL of the docker runner image for fuzzing the benchmark with\n fuzzer.\"\"\"\n base_tag = experiment_utils.get_base_docker_tag(cloud_project)\n- if is_oss_fuzz(benchmark):\n- return '{base_tag}/oss-fuzz/runners/{fuzzer}/{project}'.format(\n- base_tag=base_tag, fuzzer=fuzzer, project=get_project(benchmark))\n return '{base_tag}/runners/{fuzzer}/{benchmark}'.format(base_tag=base_tag,\n fuzzer=fuzzer,\n benchmark=benchmark)\n@@ -60,9 +57,6 @@\n \"\"\"Get the URL of the docker builder image for fuzzing the benchmark with\n fuzzer.\"\"\"\n base_tag = experiment_utils.get_base_docker_tag(cloud_project)\n- if is_oss_fuzz(benchmark):\n- return '{base_tag}/oss-fuzz/builders/{fuzzer}/{project}'.format(\n- base_tag=base_tag, fuzzer=fuzzer, project=get_project(benchmark))\n return '{base_tag}/builders/{fuzzer}/{benchmark}'.format(\n base_tag=base_tag, fuzzer=fuzzer, benchmark=benchmark)\n", "issue": "[GCP] Runners are not started.\nI pulled master and tried to evaluate libfuzzer against honggfuzz in 5 trials for 1 hour on 1 benchmark (mbedtls_fuzz_dtlsclient). It doesn't generate the report anymore. The web bucket is empty, the experiments-result folder does not exist in the data bucket, the SQL database is empty, and the Error Reporting gives the following error:\r\n```\r\nValueError: Empty experiment data. Message: Error generating HTML report.\r\nat validate_data (/work/src/analysis/data_utils.py:21)\r\nat generate_report (/work/src/analysis/generate_report.py:132)\r\nat output_report (/work/src/experiment/reporter.py:43)\r\n```\r\n\r\nI deleted authorization keys of the service account. I deleted the old and set up a new SQL database (incl. `alembic upgrade head`). I cleaned out the container registry (by deleting the `container` folder in the corresponding bucket). I cleaned out the Cloud Builds (by deleting `source` folder in the corresponding bucket). It recreates the containers and builds, when I start the dispatcher. The dispatcher runs properly. I SSH'ed into a random runner: `docker images` and `docker ps -a` return empty-handed. Is the recent setup gcr.io/fuzzbench-specific? Any suggestion to debug?\n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Code for dealing with benchmarks.\"\"\"\nimport os\nimport re\n\nfrom common import experiment_utils\nfrom common import fuzzer_utils\nfrom common import logs\nfrom common import oss_fuzz\nfrom common import utils\n\nVALID_BENCHMARK_REGEX = re.compile(r'^[A-Za-z0-9\\._\\-]+$')\n\n\ndef is_oss_fuzz(benchmark):\n \"\"\"Returns True if |benchmark| is OSS-Fuzz-based project.\"\"\"\n return os.path.isfile(oss_fuzz.get_config_file(benchmark))\n\n\ndef get_project(benchmark):\n \"\"\"Returns the OSS-Fuzz project of |benchmark| if it is based on an\n OSS-Fuzz project, otherwise raises ValueError.\"\"\"\n if is_oss_fuzz(benchmark):\n return oss_fuzz.get_config(benchmark)['project']\n raise ValueError('Can only get project on OSS-Fuzz benchmarks.')\n\n\ndef get_fuzz_target(benchmark):\n \"\"\"Returns the fuzz target of |benchmark|\"\"\"\n if is_oss_fuzz(benchmark):\n return oss_fuzz.get_config(benchmark)['fuzz_target']\n return fuzzer_utils.DEFAULT_FUZZ_TARGET_NAME\n\n\ndef get_runner_image_url(benchmark, fuzzer, cloud_project):\n \"\"\"Get the URL of the docker runner image for fuzzing the benchmark with\n fuzzer.\"\"\"\n base_tag = experiment_utils.get_base_docker_tag(cloud_project)\n if is_oss_fuzz(benchmark):\n return '{base_tag}/oss-fuzz/runners/{fuzzer}/{project}'.format(\n base_tag=base_tag, fuzzer=fuzzer, project=get_project(benchmark))\n return '{base_tag}/runners/{fuzzer}/{benchmark}'.format(base_tag=base_tag,\n fuzzer=fuzzer,\n benchmark=benchmark)\n\n\ndef get_builder_image_url(benchmark, fuzzer, cloud_project):\n \"\"\"Get the URL of the docker builder image for fuzzing the benchmark with\n fuzzer.\"\"\"\n base_tag = experiment_utils.get_base_docker_tag(cloud_project)\n if is_oss_fuzz(benchmark):\n return '{base_tag}/oss-fuzz/builders/{fuzzer}/{project}'.format(\n base_tag=base_tag, fuzzer=fuzzer, project=get_project(benchmark))\n return '{base_tag}/builders/{fuzzer}/{benchmark}'.format(\n base_tag=base_tag, fuzzer=fuzzer, benchmark=benchmark)\n\n\ndef get_oss_fuzz_builder_hash(benchmark):\n \"\"\"Get the specified hash of the OSS-Fuzz builder for the OSS-Fuzz project\n used by |benchmark|.\"\"\"\n if is_oss_fuzz(benchmark):\n return oss_fuzz.get_config(benchmark)['oss_fuzz_builder_hash']\n raise ValueError('Can only get project on OSS-Fuzz benchmarks.')\n\n\ndef validate(benchmark):\n \"\"\"Return True if |benchmark| is a valid fuzzbench fuzzer.\"\"\"\n if VALID_BENCHMARK_REGEX.match(benchmark) is None:\n logs.error('%s does not conform to %s pattern.', benchmark,\n VALID_BENCHMARK_REGEX.pattern)\n return False\n if benchmark in get_all_benchmarks():\n return True\n logs.error('%s must have a build.sh or oss-fuzz.yaml.', benchmark)\n return False\n\n\ndef get_all_benchmarks():\n \"\"\"Returns the list of all benchmarks.\"\"\"\n benchmarks_dir = os.path.join(utils.ROOT_DIR, 'benchmarks')\n all_benchmarks = []\n for benchmark in os.listdir(benchmarks_dir):\n benchmark_path = os.path.join(benchmarks_dir, benchmark)\n if os.path.isfile(os.path.join(benchmark_path, 'oss-fuzz.yaml')):\n # Benchmark is an OSS-Fuzz benchmark.\n all_benchmarks.append(benchmark)\n elif os.path.isfile(os.path.join(benchmark_path, 'build.sh')):\n # Benchmark is a standard benchmark.\n all_benchmarks.append(benchmark)\n return all_benchmarks\n", "path": "common/benchmark_utils.py"}]} | 1,962 | 291 |
gh_patches_debug_9883 | rasdani/github-patches | git_diff | sublimelsp__LSP-2408 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Goto Definition throws
**Describe the bug**
Using `LSP-pyright`, click `Definition`. Nothing will happen visually.
**To Reproduce**
1. `LSP-pyright`
2. hover over something
3. click `Definition` (or `Type Definition` or `Declaration`)
4. see error in console:
```py
Traceback (most recent call last):
File "D:\Programs\Sublime Text 4\Data\Installed Packages\LSP.sublime-package\plugin/core/sessions.py", line 1681, in <lambda>
File "D:\Programs\Sublime Text 4\Data\Installed Packages\LSP.sublime-package\plugin/core/open.py", line 96, in open_file
File "D:\Programs\Sublime Text 4\Data\Installed Packages\LSP.sublime-package\plugin/core/open.py", line 82, in _find_open_file
TypeError: find_open_file() takes 2 positional arguments but 3 were given
```
**Expected behavior**
no error, I go to definition
**Environment (please complete the following information):**
- OS: windows
- Sublime Text version: 4121
- LSP version: 1.28.0
- Language servers used: pyright-1.2.42
</issue>
<code>
[start of plugin/core/open.py]
1 from .logging import exception_log
2 from .promise import Promise
3 from .promise import ResolveFunc
4 from .protocol import DocumentUri
5 from .protocol import Range
6 from .protocol import UINT_MAX
7 from .typing import Dict, Tuple, Optional
8 from .typing import cast
9 from .url import parse_uri
10 from .views import range_to_region
11 from urllib.parse import unquote, urlparse
12 import os
13 import re
14 import sublime
15 import sublime_plugin
16 import subprocess
17 import webbrowser
18
19
20 opening_files = {} # type: Dict[str, Tuple[Promise[Optional[sublime.View]], ResolveFunc[Optional[sublime.View]]]]
21 FRAGMENT_PATTERN = re.compile(r'^L?(\d+)(?:,(\d+))?(?:-L?(\d+)(?:,(\d+))?)?')
22
23
24 def lsp_range_from_uri_fragment(fragment: str) -> Optional[Range]:
25 match = FRAGMENT_PATTERN.match(fragment)
26 if match:
27 selection = {'start': {'line': 0, 'character': 0}, 'end': {'line': 0, 'character': 0}} # type: Range
28 # Line and column numbers in the fragment are assumed to be 1-based and need to be converted to 0-based
29 # numbers for the LSP Position structure.
30 start_line, start_column, end_line, end_column = [max(0, int(g) - 1) if g else None for g in match.groups()]
31 if start_line:
32 selection['start']['line'] = start_line
33 selection['end']['line'] = start_line
34 if start_column:
35 selection['start']['character'] = start_column
36 selection['end']['character'] = start_column
37 if end_line:
38 selection['end']['line'] = end_line
39 selection['end']['character'] = UINT_MAX
40 if end_column is not None:
41 selection['end']['character'] = end_column
42 return selection
43 return None
44
45
46 def open_file_uri(
47 window: sublime.Window, uri: DocumentUri, flags: int = 0, group: int = -1
48 ) -> Promise[Optional[sublime.View]]:
49
50 decoded_uri = unquote(uri) # decode percent-encoded characters
51 parsed = urlparse(decoded_uri)
52 open_promise = open_file(window, decoded_uri, flags, group)
53 if parsed.fragment:
54 selection = lsp_range_from_uri_fragment(parsed.fragment)
55 if selection:
56 return open_promise.then(lambda view: _select_and_center(view, cast(Range, selection)))
57 return open_promise
58
59
60 def _select_and_center(view: Optional[sublime.View], r: Range) -> Optional[sublime.View]:
61 if view:
62 return center_selection(view, r)
63 return None
64
65
66 def _return_existing_view(flags: int, existing_view_group: int, active_group: int, specified_group: int) -> bool:
67 if specified_group > -1:
68 return existing_view_group == specified_group
69 if bool(flags & (sublime.ADD_TO_SELECTION | sublime.REPLACE_MRU)):
70 return False
71 if existing_view_group == active_group:
72 return True
73 return not bool(flags & sublime.FORCE_GROUP)
74
75
76 def _find_open_file(window: sublime.Window, fname: str, group: int = -1) -> Optional[sublime.View]:
77 """A replacement for Window.find_open_file that prefers the active view instead of the leftmost one."""
78 _group = window.active_group() if group == -1 else group
79 view = window.active_view_in_group(_group)
80 if view and fname == view.file_name():
81 return view
82 return window.find_open_file(fname, group)
83
84
85 def open_file(
86 window: sublime.Window, uri: DocumentUri, flags: int = 0, group: int = -1
87 ) -> Promise[Optional[sublime.View]]:
88 """
89 Open a file asynchronously.
90 It is only safe to call this function from the UI thread.
91 The provided uri MUST be a file URI
92 """
93 file = parse_uri(uri)[1]
94 # window.open_file brings the file to focus if it's already opened, which we don't want (unless it's supposed
95 # to open as a separate view).
96 view = _find_open_file(window, file)
97 if view and _return_existing_view(flags, window.get_view_index(view)[0], window.active_group(), group):
98 return Promise.resolve(view)
99
100 was_already_open = view is not None
101 view = window.open_file(file, flags, group)
102 if not view.is_loading():
103 if was_already_open and (flags & sublime.SEMI_TRANSIENT):
104 # workaround bug https://github.com/sublimehq/sublime_text/issues/2411 where transient view might not get
105 # its view listeners initialized.
106 sublime_plugin.check_view_event_listeners(view) # type: ignore
107 # It's already loaded. Possibly already open in a tab.
108 return Promise.resolve(view)
109
110 # Is the view opening right now? Then return the associated unresolved promise
111 for fn, value in opening_files.items():
112 if fn == file or os.path.samefile(fn, file):
113 # Return the unresolved promise. A future on_load event will resolve the promise.
114 return value[0]
115
116 # Prepare a new promise to be resolved by a future on_load event (see the event listener in main.py)
117 def fullfill(resolve: ResolveFunc[Optional[sublime.View]]) -> None:
118 global opening_files
119 # Save the promise in the first element of the tuple -- except we cannot yet do that here
120 opening_files[file] = (None, resolve) # type: ignore
121
122 promise = Promise(fullfill)
123 tup = opening_files[file]
124 # Save the promise in the first element of the tuple so that the for-loop above can return it
125 opening_files[file] = (promise, tup[1])
126 return promise
127
128
129 def center_selection(v: sublime.View, r: Range) -> sublime.View:
130 selection = range_to_region(r, v)
131 v.run_command("lsp_selection_set", {"regions": [(selection.a, selection.a)]})
132 window = v.window()
133 if window:
134 window.focus_view(v)
135 if int(sublime.version()) >= 4124:
136 v.show_at_center(selection.begin(), animate=False)
137 else:
138 # TODO: remove later when a stable build lands
139 v.show_at_center(selection.begin()) # type: ignore
140 return v
141
142
143 def open_in_browser(uri: str) -> None:
144 # NOTE: Remove this check when on py3.8.
145 if not uri.lower().startswith(("http://", "https://")):
146 uri = "https://" + uri
147 if not webbrowser.open(uri):
148 sublime.status_message("failed to open: " + uri)
149
150
151 def open_externally(uri: str, take_focus: bool) -> bool:
152 """
153 A blocking function that invokes the OS's "open with default extension"
154 """
155 try:
156 # TODO: handle take_focus
157 if sublime.platform() == "windows":
158 os.startfile(uri) # type: ignore
159 elif sublime.platform() == "osx":
160 subprocess.check_call(("/usr/bin/open", uri))
161 else: # linux
162 subprocess.check_call(("xdg-open", uri))
163 return True
164 except Exception as ex:
165 exception_log("Failed to open {}".format(uri), ex)
166 return False
167
[end of plugin/core/open.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugin/core/open.py b/plugin/core/open.py
--- a/plugin/core/open.py
+++ b/plugin/core/open.py
@@ -1,3 +1,4 @@
+from .constants import ST_VERSION
from .logging import exception_log
from .promise import Promise
from .promise import ResolveFunc
@@ -79,7 +80,7 @@
view = window.active_view_in_group(_group)
if view and fname == view.file_name():
return view
- return window.find_open_file(fname, group)
+ return window.find_open_file(fname, group) if ST_VERSION >= 4136 else window.find_open_file(fname)
def open_file(
| {"golden_diff": "diff --git a/plugin/core/open.py b/plugin/core/open.py\n--- a/plugin/core/open.py\n+++ b/plugin/core/open.py\n@@ -1,3 +1,4 @@\n+from .constants import ST_VERSION\n from .logging import exception_log\n from .promise import Promise\n from .promise import ResolveFunc\n@@ -79,7 +80,7 @@\n view = window.active_view_in_group(_group)\n if view and fname == view.file_name():\n return view\n- return window.find_open_file(fname, group)\n+ return window.find_open_file(fname, group) if ST_VERSION >= 4136 else window.find_open_file(fname)\n \n \n def open_file(\n", "issue": "Goto Definition throws\n**Describe the bug**\r\nUsing `LSP-pyright`, click `Definition`. Nothing will happen visually.\r\n\r\n**To Reproduce**\r\n1. `LSP-pyright`\r\n2. hover over something\r\n3. click `Definition` (or `Type Definition` or `Declaration`)\r\n4. see error in console: \r\n```py\r\nTraceback (most recent call last):\r\n File \"D:\\Programs\\Sublime Text 4\\Data\\Installed Packages\\LSP.sublime-package\\plugin/core/sessions.py\", line 1681, in <lambda>\r\n File \"D:\\Programs\\Sublime Text 4\\Data\\Installed Packages\\LSP.sublime-package\\plugin/core/open.py\", line 96, in open_file\r\n File \"D:\\Programs\\Sublime Text 4\\Data\\Installed Packages\\LSP.sublime-package\\plugin/core/open.py\", line 82, in _find_open_file\r\nTypeError: find_open_file() takes 2 positional arguments but 3 were given\r\n```\r\n\r\n**Expected behavior**\r\nno error, I go to definition\r\n\r\n**Environment (please complete the following information):**\r\n- OS: windows\r\n- Sublime Text version: 4121\r\n- LSP version: 1.28.0\r\n- Language servers used: pyright-1.2.42\r\n\n", "before_files": [{"content": "from .logging import exception_log\nfrom .promise import Promise\nfrom .promise import ResolveFunc\nfrom .protocol import DocumentUri\nfrom .protocol import Range\nfrom .protocol import UINT_MAX\nfrom .typing import Dict, Tuple, Optional\nfrom .typing import cast\nfrom .url import parse_uri\nfrom .views import range_to_region\nfrom urllib.parse import unquote, urlparse\nimport os\nimport re\nimport sublime\nimport sublime_plugin\nimport subprocess\nimport webbrowser\n\n\nopening_files = {} # type: Dict[str, Tuple[Promise[Optional[sublime.View]], ResolveFunc[Optional[sublime.View]]]]\nFRAGMENT_PATTERN = re.compile(r'^L?(\\d+)(?:,(\\d+))?(?:-L?(\\d+)(?:,(\\d+))?)?')\n\n\ndef lsp_range_from_uri_fragment(fragment: str) -> Optional[Range]:\n match = FRAGMENT_PATTERN.match(fragment)\n if match:\n selection = {'start': {'line': 0, 'character': 0}, 'end': {'line': 0, 'character': 0}} # type: Range\n # Line and column numbers in the fragment are assumed to be 1-based and need to be converted to 0-based\n # numbers for the LSP Position structure.\n start_line, start_column, end_line, end_column = [max(0, int(g) - 1) if g else None for g in match.groups()]\n if start_line:\n selection['start']['line'] = start_line\n selection['end']['line'] = start_line\n if start_column:\n selection['start']['character'] = start_column\n selection['end']['character'] = start_column\n if end_line:\n selection['end']['line'] = end_line\n selection['end']['character'] = UINT_MAX\n if end_column is not None:\n selection['end']['character'] = end_column\n return selection\n return None\n\n\ndef open_file_uri(\n window: sublime.Window, uri: DocumentUri, flags: int = 0, group: int = -1\n) -> Promise[Optional[sublime.View]]:\n\n decoded_uri = unquote(uri) # decode percent-encoded characters\n parsed = urlparse(decoded_uri)\n open_promise = open_file(window, decoded_uri, flags, group)\n if parsed.fragment:\n selection = lsp_range_from_uri_fragment(parsed.fragment)\n if selection:\n return open_promise.then(lambda view: _select_and_center(view, cast(Range, selection)))\n return open_promise\n\n\ndef _select_and_center(view: Optional[sublime.View], r: Range) -> Optional[sublime.View]:\n if view:\n return center_selection(view, r)\n return None\n\n\ndef _return_existing_view(flags: int, existing_view_group: int, active_group: int, specified_group: int) -> bool:\n if specified_group > -1:\n return existing_view_group == specified_group\n if bool(flags & (sublime.ADD_TO_SELECTION | sublime.REPLACE_MRU)):\n return False\n if existing_view_group == active_group:\n return True\n return not bool(flags & sublime.FORCE_GROUP)\n\n\ndef _find_open_file(window: sublime.Window, fname: str, group: int = -1) -> Optional[sublime.View]:\n \"\"\"A replacement for Window.find_open_file that prefers the active view instead of the leftmost one.\"\"\"\n _group = window.active_group() if group == -1 else group\n view = window.active_view_in_group(_group)\n if view and fname == view.file_name():\n return view\n return window.find_open_file(fname, group)\n\n\ndef open_file(\n window: sublime.Window, uri: DocumentUri, flags: int = 0, group: int = -1\n) -> Promise[Optional[sublime.View]]:\n \"\"\"\n Open a file asynchronously.\n It is only safe to call this function from the UI thread.\n The provided uri MUST be a file URI\n \"\"\"\n file = parse_uri(uri)[1]\n # window.open_file brings the file to focus if it's already opened, which we don't want (unless it's supposed\n # to open as a separate view).\n view = _find_open_file(window, file)\n if view and _return_existing_view(flags, window.get_view_index(view)[0], window.active_group(), group):\n return Promise.resolve(view)\n\n was_already_open = view is not None\n view = window.open_file(file, flags, group)\n if not view.is_loading():\n if was_already_open and (flags & sublime.SEMI_TRANSIENT):\n # workaround bug https://github.com/sublimehq/sublime_text/issues/2411 where transient view might not get\n # its view listeners initialized.\n sublime_plugin.check_view_event_listeners(view) # type: ignore\n # It's already loaded. Possibly already open in a tab.\n return Promise.resolve(view)\n\n # Is the view opening right now? Then return the associated unresolved promise\n for fn, value in opening_files.items():\n if fn == file or os.path.samefile(fn, file):\n # Return the unresolved promise. A future on_load event will resolve the promise.\n return value[0]\n\n # Prepare a new promise to be resolved by a future on_load event (see the event listener in main.py)\n def fullfill(resolve: ResolveFunc[Optional[sublime.View]]) -> None:\n global opening_files\n # Save the promise in the first element of the tuple -- except we cannot yet do that here\n opening_files[file] = (None, resolve) # type: ignore\n\n promise = Promise(fullfill)\n tup = opening_files[file]\n # Save the promise in the first element of the tuple so that the for-loop above can return it\n opening_files[file] = (promise, tup[1])\n return promise\n\n\ndef center_selection(v: sublime.View, r: Range) -> sublime.View:\n selection = range_to_region(r, v)\n v.run_command(\"lsp_selection_set\", {\"regions\": [(selection.a, selection.a)]})\n window = v.window()\n if window:\n window.focus_view(v)\n if int(sublime.version()) >= 4124:\n v.show_at_center(selection.begin(), animate=False)\n else:\n # TODO: remove later when a stable build lands\n v.show_at_center(selection.begin()) # type: ignore\n return v\n\n\ndef open_in_browser(uri: str) -> None:\n # NOTE: Remove this check when on py3.8.\n if not uri.lower().startswith((\"http://\", \"https://\")):\n uri = \"https://\" + uri\n if not webbrowser.open(uri):\n sublime.status_message(\"failed to open: \" + uri)\n\n\ndef open_externally(uri: str, take_focus: bool) -> bool:\n \"\"\"\n A blocking function that invokes the OS's \"open with default extension\"\n \"\"\"\n try:\n # TODO: handle take_focus\n if sublime.platform() == \"windows\":\n os.startfile(uri) # type: ignore\n elif sublime.platform() == \"osx\":\n subprocess.check_call((\"/usr/bin/open\", uri))\n else: # linux\n subprocess.check_call((\"xdg-open\", uri))\n return True\n except Exception as ex:\n exception_log(\"Failed to open {}\".format(uri), ex)\n return False\n", "path": "plugin/core/open.py"}]} | 2,803 | 146 |
gh_patches_debug_3017 | rasdani/github-patches | git_diff | secdev__scapy-2255 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tcpdump check error in centos
#### Brief description
> I have installed tcpdump in PATH, but it reports:
scapy.error.Scapy_Exception: tcpdump is not available. Cannot use filter !
I found the code which check tcudmp in /opt/rh/rh-python36/root/usr/lib/python3.6/site-packages/scapy/arch/common.py:
```
def _check_tcpdump():
"""
Return True if the tcpdump command can be started
"""
with open(os.devnull, 'wb') as devnull:
try:
proc = subprocess.Popen([conf.prog.tcpdump, "--version"],
stdout=devnull, stderr=subprocess.STDOUT)
except OSError:
return False
return proc.wait() == 0
```
the error is that tcpdump --version return 1 instead of 0
eg:
```
[root@localhost proxy]# tcpdump --version
tcpdump version 4.1-PRE-CVS_2017_03_21
libpcap version 1.4.0
Usage: tcpdump [-aAdDefhIJKlLnNOpqRStuUvxX] [ -B size ] [ -c count ]
[ -C file_size ] [ -E algo:secret ] [ -F file ] [ -G seconds ]
[ -i interface ] [ -j tstamptype ] [ -M secret ]
[ -Q|-P in|out|inout ]
[ -r file ] [ -s snaplen ] [ -T type ] [ -w file ]
[ -W filecount ] [ -y datalinktype ] [ -z command ]
[ -Z user ] [ expression ]
[root@localhost proxy]# echo $?
1
```
#### Environment
```
[root@localhost proxy]# python3.6 --version
Python 3.6.3
[root@localhost proxy]# pip3.6 freeze
certifi==2018.11.29
chardet==3.0.4
idna==2.8
protobuf==3.6.1
psutil==5.4.8
PyMySQL==0.9.3
redis==3.0.1
requests==2.21.0
s8-protocol==1.0
scapy==2.4.2
six==1.11.0
snakeMQ==1.6
urllib3==1.24.1
virtualenv==15.1.0
xlrd==1.2.0
You are using pip version 9.0.1, however version 19.0.2 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.
[root@localhost proxy]# uname -a
Linux localhost.localdomain 2.6.32-431.el6.x86_64 #1 SMP Fri Nov 22 03:15:09 UTC 2013 x86_64 x86_64 x86_64 GNU/Linux
[root@localhost proxy]# cat /etc/issue
CentOS release 6.5 (Final)
Kernel \r on an \m
```
</issue>
<code>
[start of scapy/arch/common.py]
1 # This file is part of Scapy
2 # See http://www.secdev.org/projects/scapy for more information
3 # Copyright (C) Philippe Biondi <[email protected]>
4 # This program is published under a GPLv2 license
5
6 """
7 Functions common to different architectures
8 """
9
10 import ctypes
11 import os
12 import socket
13 import struct
14 import subprocess
15 import time
16 from ctypes import POINTER, Structure
17 from ctypes import c_uint, c_uint32, c_ushort, c_ubyte
18 from scapy.consts import WINDOWS
19 from scapy.config import conf
20 from scapy.data import MTU
21 from scapy.error import Scapy_Exception
22 import scapy.modules.six as six
23
24 if not WINDOWS:
25 from fcntl import ioctl
26
27 # BOOT
28
29
30 def _check_tcpdump():
31 """
32 Return True if the tcpdump command can be started
33 """
34 try:
35 proc = subprocess.Popen(
36 [conf.prog.tcpdump, "--version"],
37 stdout=subprocess.PIPE,
38 stderr=subprocess.STDOUT
39 )
40 output = proc.communicate()[0]
41 except OSError:
42 return False
43
44 # On some systems, --version does not exist on tcpdump
45 return proc.returncode == 0 or output.startswith(b'Usage: tcpdump ')
46
47
48 # This won't be used on Windows
49 TCPDUMP = WINDOWS or _check_tcpdump()
50
51 # UTILS
52
53
54 def get_if(iff, cmd):
55 """Ease SIOCGIF* ioctl calls"""
56
57 sck = socket.socket()
58 ifreq = ioctl(sck, cmd, struct.pack("16s16x", iff.encode("utf8")))
59 sck.close()
60 return ifreq
61
62
63 def get_if_raw_hwaddr(iff):
64 """Get the raw MAC address of a local interface.
65
66 This function uses SIOCGIFHWADDR calls, therefore only works
67 on some distros.
68
69 :param iff: the network interface name as a string
70 :returns: the corresponding raw MAC address
71 """
72 from scapy.arch import SIOCGIFHWADDR
73 return struct.unpack("16xh6s8x", get_if(iff, SIOCGIFHWADDR))
74
75 # SOCKET UTILS
76
77
78 def _select_nonblock(sockets, remain=None):
79 """This function is called during sendrecv() routine to select
80 the available sockets.
81 """
82 # pcap sockets aren't selectable, so we return all of them
83 # and ask the selecting functions to use nonblock_recv instead of recv
84 def _sleep_nonblock_recv(self):
85 res = self.nonblock_recv()
86 if res is None:
87 time.sleep(conf.recv_poll_rate)
88 return res
89 # we enforce remain=None: don't wait.
90 return sockets, _sleep_nonblock_recv
91
92 # BPF HANDLERS
93
94
95 class bpf_insn(Structure):
96 """"The BPF instruction data structure"""
97 _fields_ = [("code", c_ushort),
98 ("jt", c_ubyte),
99 ("jf", c_ubyte),
100 ("k", c_uint32)]
101
102
103 class bpf_program(Structure):
104 """"Structure for BIOCSETF"""
105 _fields_ = [("bf_len", c_uint),
106 ("bf_insns", POINTER(bpf_insn))]
107
108
109 def _legacy_bpf_pointer(tcpdump_lines):
110 """Get old-format BPF Pointer. Deprecated"""
111 X86_64 = os.uname()[4] in ['x86_64', 'aarch64']
112 size = int(tcpdump_lines[0])
113 bpf = b""
114 for l in tcpdump_lines[1:]:
115 if six.PY2:
116 int_type = long # noqa: F821
117 else:
118 int_type = int
119 bpf += struct.pack("HBBI", *map(int_type, l.split()))
120
121 # Thanks to http://www.netprojects.de/scapy-with-pypy-solved/ for the pypy trick # noqa: E501
122 if conf.use_pypy:
123 str_buffer = ctypes.create_string_buffer(bpf)
124 return struct.pack('HL', size, ctypes.addressof(str_buffer))
125 else:
126 # XXX. Argl! We need to give the kernel a pointer on the BPF,
127 # Python object header seems to be 20 bytes. 36 bytes for x86 64bits arch. # noqa: E501
128 if X86_64:
129 return struct.pack("HL", size, id(bpf) + 36)
130 else:
131 return struct.pack("HI", size, id(bpf) + 20)
132
133
134 def get_bpf_pointer(tcpdump_lines):
135 """Create a BPF Pointer for TCPDump filter"""
136 if conf.use_pypy:
137 return _legacy_bpf_pointer(tcpdump_lines)
138
139 # Allocate BPF instructions
140 size = int(tcpdump_lines[0])
141 bpf_insn_a = bpf_insn * size
142 bip = bpf_insn_a()
143
144 # Fill the BPF instruction structures with the byte code
145 tcpdump_lines = tcpdump_lines[1:]
146 i = 0
147 for line in tcpdump_lines:
148 values = [int(v) for v in line.split()]
149 bip[i].code = c_ushort(values[0])
150 bip[i].jt = c_ubyte(values[1])
151 bip[i].jf = c_ubyte(values[2])
152 bip[i].k = c_uint(values[3])
153 i += 1
154
155 # Create the BPF program
156 return bpf_program(size, bip)
157
158
159 def compile_filter(bpf_filter, iface=None):
160 """Asks Tcpdump to parse the filter, then build the matching
161 BPF bytecode using get_bpf_pointer.
162 """
163 if not TCPDUMP:
164 raise Scapy_Exception("tcpdump is not available. Cannot use filter !")
165 try:
166 process = subprocess.Popen([
167 conf.prog.tcpdump,
168 "-p",
169 "-i", (conf.iface if iface is None else iface),
170 "-ddd",
171 "-s", str(MTU),
172 bpf_filter],
173 stdout=subprocess.PIPE,
174 stderr=subprocess.PIPE
175 )
176 except OSError as ex:
177 raise Scapy_Exception("Failed to attach filter: %s" % ex)
178 lines, err = process.communicate()
179 ret = process.returncode
180 if ret:
181 raise Scapy_Exception(
182 "Failed to attach filter: tcpdump returned: %s" % err
183 )
184 lines = lines.strip().split(b"\n")
185 return get_bpf_pointer(lines)
186
[end of scapy/arch/common.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scapy/arch/common.py b/scapy/arch/common.py
--- a/scapy/arch/common.py
+++ b/scapy/arch/common.py
@@ -42,7 +42,9 @@
return False
# On some systems, --version does not exist on tcpdump
- return proc.returncode == 0 or output.startswith(b'Usage: tcpdump ')
+ return proc.returncode == 0 \
+ or output.startswith(b'Usage: tcpdump ') \
+ or output.startswith(b'tcpdump: unrecognized option')
# This won't be used on Windows
| {"golden_diff": "diff --git a/scapy/arch/common.py b/scapy/arch/common.py\n--- a/scapy/arch/common.py\n+++ b/scapy/arch/common.py\n@@ -42,7 +42,9 @@\n return False\n \n # On some systems, --version does not exist on tcpdump\n- return proc.returncode == 0 or output.startswith(b'Usage: tcpdump ')\n+ return proc.returncode == 0 \\\n+ or output.startswith(b'Usage: tcpdump ') \\\n+ or output.startswith(b'tcpdump: unrecognized option')\n \n \n # This won't be used on Windows\n", "issue": "tcpdump check error in centos\n#### Brief description\r\n\r\n> I have installed tcpdump in PATH, but it reports:\r\nscapy.error.Scapy_Exception: tcpdump is not available. Cannot use filter !\r\nI found the code which check tcudmp in /opt/rh/rh-python36/root/usr/lib/python3.6/site-packages/scapy/arch/common.py:\r\n\r\n```\r\ndef _check_tcpdump():\r\n \"\"\"\r\n Return True if the tcpdump command can be started\r\n \"\"\"\r\n with open(os.devnull, 'wb') as devnull:\r\n try:\r\n proc = subprocess.Popen([conf.prog.tcpdump, \"--version\"],\r\n stdout=devnull, stderr=subprocess.STDOUT)\r\n except OSError:\r\n return False\r\n return proc.wait() == 0\r\n```\r\n\r\n\r\nthe error is that tcpdump --version return 1 instead of 0\r\neg:\r\n```\r\n[root@localhost proxy]# tcpdump --version\r\ntcpdump version 4.1-PRE-CVS_2017_03_21\r\nlibpcap version 1.4.0\r\nUsage: tcpdump [-aAdDefhIJKlLnNOpqRStuUvxX] [ -B size ] [ -c count ]\r\n\t\t[ -C file_size ] [ -E algo:secret ] [ -F file ] [ -G seconds ]\r\n\t\t[ -i interface ] [ -j tstamptype ] [ -M secret ]\r\n\t\t[ -Q|-P in|out|inout ]\r\n\t\t[ -r file ] [ -s snaplen ] [ -T type ] [ -w file ]\r\n\t\t[ -W filecount ] [ -y datalinktype ] [ -z command ]\r\n\t\t[ -Z user ] [ expression ]\r\n[root@localhost proxy]# echo $?\r\n1\r\n```\r\n\r\n#### Environment\r\n\r\n```\r\n[root@localhost proxy]# python3.6 --version\r\nPython 3.6.3\r\n[root@localhost proxy]# pip3.6 freeze\r\ncertifi==2018.11.29\r\nchardet==3.0.4\r\nidna==2.8\r\nprotobuf==3.6.1\r\npsutil==5.4.8\r\nPyMySQL==0.9.3\r\nredis==3.0.1\r\nrequests==2.21.0\r\ns8-protocol==1.0\r\nscapy==2.4.2\r\nsix==1.11.0\r\nsnakeMQ==1.6\r\nurllib3==1.24.1\r\nvirtualenv==15.1.0\r\nxlrd==1.2.0\r\nYou are using pip version 9.0.1, however version 19.0.2 is available.\r\nYou should consider upgrading via the 'pip install --upgrade pip' command.\r\n[root@localhost proxy]# uname -a\r\nLinux localhost.localdomain 2.6.32-431.el6.x86_64 #1 SMP Fri Nov 22 03:15:09 UTC 2013 x86_64 x86_64 x86_64 GNU/Linux\r\n[root@localhost proxy]# cat /etc/issue\r\nCentOS release 6.5 (Final)\r\nKernel \\r on an \\m\r\n```\r\n\n", "before_files": [{"content": "# This file is part of Scapy\n# See http://www.secdev.org/projects/scapy for more information\n# Copyright (C) Philippe Biondi <[email protected]>\n# This program is published under a GPLv2 license\n\n\"\"\"\nFunctions common to different architectures\n\"\"\"\n\nimport ctypes\nimport os\nimport socket\nimport struct\nimport subprocess\nimport time\nfrom ctypes import POINTER, Structure\nfrom ctypes import c_uint, c_uint32, c_ushort, c_ubyte\nfrom scapy.consts import WINDOWS\nfrom scapy.config import conf\nfrom scapy.data import MTU\nfrom scapy.error import Scapy_Exception\nimport scapy.modules.six as six\n\nif not WINDOWS:\n from fcntl import ioctl\n\n# BOOT\n\n\ndef _check_tcpdump():\n \"\"\"\n Return True if the tcpdump command can be started\n \"\"\"\n try:\n proc = subprocess.Popen(\n [conf.prog.tcpdump, \"--version\"],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n output = proc.communicate()[0]\n except OSError:\n return False\n\n # On some systems, --version does not exist on tcpdump\n return proc.returncode == 0 or output.startswith(b'Usage: tcpdump ')\n\n\n# This won't be used on Windows\nTCPDUMP = WINDOWS or _check_tcpdump()\n\n# UTILS\n\n\ndef get_if(iff, cmd):\n \"\"\"Ease SIOCGIF* ioctl calls\"\"\"\n\n sck = socket.socket()\n ifreq = ioctl(sck, cmd, struct.pack(\"16s16x\", iff.encode(\"utf8\")))\n sck.close()\n return ifreq\n\n\ndef get_if_raw_hwaddr(iff):\n \"\"\"Get the raw MAC address of a local interface.\n\n This function uses SIOCGIFHWADDR calls, therefore only works\n on some distros.\n\n :param iff: the network interface name as a string\n :returns: the corresponding raw MAC address\n \"\"\"\n from scapy.arch import SIOCGIFHWADDR\n return struct.unpack(\"16xh6s8x\", get_if(iff, SIOCGIFHWADDR))\n\n# SOCKET UTILS\n\n\ndef _select_nonblock(sockets, remain=None):\n \"\"\"This function is called during sendrecv() routine to select\n the available sockets.\n \"\"\"\n # pcap sockets aren't selectable, so we return all of them\n # and ask the selecting functions to use nonblock_recv instead of recv\n def _sleep_nonblock_recv(self):\n res = self.nonblock_recv()\n if res is None:\n time.sleep(conf.recv_poll_rate)\n return res\n # we enforce remain=None: don't wait.\n return sockets, _sleep_nonblock_recv\n\n# BPF HANDLERS\n\n\nclass bpf_insn(Structure):\n \"\"\"\"The BPF instruction data structure\"\"\"\n _fields_ = [(\"code\", c_ushort),\n (\"jt\", c_ubyte),\n (\"jf\", c_ubyte),\n (\"k\", c_uint32)]\n\n\nclass bpf_program(Structure):\n \"\"\"\"Structure for BIOCSETF\"\"\"\n _fields_ = [(\"bf_len\", c_uint),\n (\"bf_insns\", POINTER(bpf_insn))]\n\n\ndef _legacy_bpf_pointer(tcpdump_lines):\n \"\"\"Get old-format BPF Pointer. Deprecated\"\"\"\n X86_64 = os.uname()[4] in ['x86_64', 'aarch64']\n size = int(tcpdump_lines[0])\n bpf = b\"\"\n for l in tcpdump_lines[1:]:\n if six.PY2:\n int_type = long # noqa: F821\n else:\n int_type = int\n bpf += struct.pack(\"HBBI\", *map(int_type, l.split()))\n\n # Thanks to http://www.netprojects.de/scapy-with-pypy-solved/ for the pypy trick # noqa: E501\n if conf.use_pypy:\n str_buffer = ctypes.create_string_buffer(bpf)\n return struct.pack('HL', size, ctypes.addressof(str_buffer))\n else:\n # XXX. Argl! We need to give the kernel a pointer on the BPF,\n # Python object header seems to be 20 bytes. 36 bytes for x86 64bits arch. # noqa: E501\n if X86_64:\n return struct.pack(\"HL\", size, id(bpf) + 36)\n else:\n return struct.pack(\"HI\", size, id(bpf) + 20)\n\n\ndef get_bpf_pointer(tcpdump_lines):\n \"\"\"Create a BPF Pointer for TCPDump filter\"\"\"\n if conf.use_pypy:\n return _legacy_bpf_pointer(tcpdump_lines)\n\n # Allocate BPF instructions\n size = int(tcpdump_lines[0])\n bpf_insn_a = bpf_insn * size\n bip = bpf_insn_a()\n\n # Fill the BPF instruction structures with the byte code\n tcpdump_lines = tcpdump_lines[1:]\n i = 0\n for line in tcpdump_lines:\n values = [int(v) for v in line.split()]\n bip[i].code = c_ushort(values[0])\n bip[i].jt = c_ubyte(values[1])\n bip[i].jf = c_ubyte(values[2])\n bip[i].k = c_uint(values[3])\n i += 1\n\n # Create the BPF program\n return bpf_program(size, bip)\n\n\ndef compile_filter(bpf_filter, iface=None):\n \"\"\"Asks Tcpdump to parse the filter, then build the matching\n BPF bytecode using get_bpf_pointer.\n \"\"\"\n if not TCPDUMP:\n raise Scapy_Exception(\"tcpdump is not available. Cannot use filter !\")\n try:\n process = subprocess.Popen([\n conf.prog.tcpdump,\n \"-p\",\n \"-i\", (conf.iface if iface is None else iface),\n \"-ddd\",\n \"-s\", str(MTU),\n bpf_filter],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n except OSError as ex:\n raise Scapy_Exception(\"Failed to attach filter: %s\" % ex)\n lines, err = process.communicate()\n ret = process.returncode\n if ret:\n raise Scapy_Exception(\n \"Failed to attach filter: tcpdump returned: %s\" % err\n )\n lines = lines.strip().split(b\"\\n\")\n return get_bpf_pointer(lines)\n", "path": "scapy/arch/common.py"}]} | 3,123 | 128 |
gh_patches_debug_17107 | rasdani/github-patches | git_diff | nvaccess__nvda-10947 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Winamp support: NVDA doesn't detect the status of shuffle and repeat modes
<!--
Please thoroughly read NVDA's wiki article on how to fill in this template, including how to provide the required files.
Issues may be closed if the required information is not present.
https://github.com/nvaccess/nvda/wiki/Github-issue-template-explanation-and-examples
-->
### Steps to reproduce:
1. Start winamp
2. Press r or s toggle shuffle or repeat
3. NVDA will say "off" every time you toggle those modes
### Actual behavior:
NVDA will say "off" every time you toggle those modes
##
### Expected behavior:
NVDA needs to state if setting is on or off.
### System configuration
#### NVDA installed/portable/running from source:
Installed
#### NVDA version:
2019.3.1
#### Windows version:
Windows10 latest
#### Name and version of other software in use when reproducing the issue:
Winamp 5.666 Build 3516
#### Other information about your system:
### Other questions
#### Does the issue still occur after restarting your computer?
Yes
#### Have you tried any other versions of NVDA? If so, please report their behaviors.
No
#### If addons are disabled, is your problem still occuring?
Yes
#### Did you try to run the COM registry fixing tool in NVDA menu / tools?
No
</issue>
<code>
[start of source/appModules/winamp.py]
1 #appModules/winamp.py
2 #A part of NonVisual Desktop Access (NVDA)
3 #Copyright (C) 2006-2012 NVDA Contributors
4 #This file is covered by the GNU General Public License.
5 #See the file COPYING for more details.
6
7 from ctypes import *
8 from ctypes.wintypes import *
9 import winKernel
10 import winUser
11 from scriptHandler import isScriptWaiting
12 from NVDAObjects.IAccessible import IAccessible
13 import appModuleHandler
14 import speech
15 import locale
16 import controlTypes
17 import api
18 import watchdog
19 import braille
20 import ui
21
22 # message used to sent many messages to winamp's main window.
23 # most all of the IPC_* messages involve sending the message in the form of:
24 # result = SendMessage(hwnd_winamp,WM_WA_IPC,(parameter),IPC_*);
25
26 WM_WA_IPC=winUser.WM_USER
27
28 # winamp window
29 IPC_GET_SHUFFLE=250
30 IPC_GET_REPEAT=251
31
32 # playlist editor
33 IPC_PLAYLIST_GET_NEXT_SELECTED=3029
34 IPC_PE_GETCURINDEX=100
35 IPC_PE_GETINDEXTOTAL=101
36 # in_process ONLY
37 IPC_PE_GETINDEXTITLE=200 # lParam = pointer to fileinfo2 structure
38
39 class fileinfo2(Structure):
40 _fields_=[
41 ('fileindex',c_int),
42 ('filetitle',c_char*256),
43 ('filelength',c_char*16),
44 ]
45
46 hwndWinamp=0
47
48 def getShuffle():
49 global hwndWinamp
50 return watchdog.cancellableSendMessage(hwndWinamp,WM_WA_IPC,0,IPC_GET_SHUFFLE)
51
52 def getRepeat():
53 global hwndWinamp
54 return watchdog.cancellableSendMessage(hwndWinamp,WM_WA_IPC,0,IPC_GET_REPEAT)
55
56 class AppModule(appModuleHandler.AppModule):
57
58 def event_NVDAObject_init(self,obj):
59 global hwndWinamp
60 hwndWinamp=windll.user32.FindWindowA("Winamp v1.x",None)
61
62 def chooseNVDAObjectOverlayClasses(self, obj, clsList):
63 windowClass = obj.windowClassName
64 if windowClass == "Winamp PE":
65 clsList.insert(0, winampPlaylistEditor)
66 elif windowClass == "Winamp v1.x":
67 clsList.insert(0, winampMainWindow)
68
69 class winampMainWindow(IAccessible):
70
71 def event_nameChange(self):
72 pass
73
74 def script_shuffleToggle(self,gesture):
75 gesture.send()
76 if not isScriptWaiting():
77 api.processPendingEvents()
78 if getShuffle():
79 # Translators: the user has pressed the shuffle tracks toggle in winamp, shuffle is now on.
80 onOff=pgettext("shuffle", "on")
81 else:
82 # Translators: the user has pressed the shuffle tracks toggle in winamp, shuffle is now off.
83 onOff=pgettext("shuffle", "off")
84 ui.message(onOff)
85
86 def script_repeatToggle(self,gesture):
87 gesture.send()
88 if not isScriptWaiting():
89 api.processPendingEvents()
90 if getRepeat():
91 # Translators: the user has pressed the repeat track toggle in winamp, repeat is now on.
92 onOff=pgettext("repeat", "on")
93 else:
94 # Translators: the user has pressed the repeat track toggle in winamp, repeat is now off.
95 onOff=pgettext("repeat", "off")
96 ui.message(onOff)
97
98 __gestures = {
99 "kb:s": "shuffleToggle",
100 "kb:r": "repeatToggle",
101 }
102
103 class winampPlaylistEditor(winampMainWindow):
104
105 def _get_name(self):
106 curIndex=watchdog.cancellableSendMessage(hwndWinamp,WM_WA_IPC,-1,IPC_PLAYLIST_GET_NEXT_SELECTED)
107 if curIndex <0:
108 return None
109 info=fileinfo2()
110 info.fileindex=curIndex
111 internalInfo=winKernel.virtualAllocEx(self.processHandle,None,sizeof(info),winKernel.MEM_COMMIT,winKernel.PAGE_READWRITE)
112 try:
113 winKernel.writeProcessMemory(self.processHandle,internalInfo,byref(info),sizeof(info),None)
114 watchdog.cancellableSendMessage(self.windowHandle,WM_WA_IPC,IPC_PE_GETINDEXTITLE,internalInfo)
115 winKernel.readProcessMemory(self.processHandle,internalInfo,byref(info),sizeof(info),None)
116 finally:
117 winKernel.virtualFreeEx(self.processHandle,internalInfo,0,winKernel.MEM_RELEASE)
118 # file title is fetched in the current locale encoding.
119 # We need to decode it to unicode first.
120 encoding=locale.getlocale()[1]
121 fileTitle=info.filetitle.decode(encoding,errors="replace")
122 return "%d.\t%s\t%s"%(curIndex+1,fileTitle,info.filelength)
123
124 def _get_role(self):
125 return controlTypes.ROLE_LISTITEM
126
127 def script_changeItem(self,gesture):
128 gesture.send()
129 if not isScriptWaiting():
130 api.processPendingEvents()
131 speech.speakObject(self,reason=controlTypes.REASON_FOCUS)
132 braille.handler.handleGainFocus(self)
133
134 def event_nameChange(self):
135 return super(winampMainWindow,self).event_nameChange()
136
137 __changeItemGestures = (
138 "kb:upArrow",
139 "kb:downArrow",
140 "kb:pageUp",
141 "kb:pageDown",
142 )
143
144 def initOverlayClass(self):
145 for gesture in self.__changeItemGestures:
146 self.bindGesture(gesture, "changeItem")
147
[end of source/appModules/winamp.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/source/appModules/winamp.py b/source/appModules/winamp.py
--- a/source/appModules/winamp.py
+++ b/source/appModules/winamp.py
@@ -1,8 +1,7 @@
-#appModules/winamp.py
-#A part of NonVisual Desktop Access (NVDA)
-#Copyright (C) 2006-2012 NVDA Contributors
-#This file is covered by the GNU General Public License.
-#See the file COPYING for more details.
+# A part of NonVisual Desktop Access (NVDA)
+# Copyright (C) 2006-2020 NV Access Limited
+# This file is covered by the GNU General Public License.
+# See the file COPYING for more details.
from ctypes import *
from ctypes.wintypes import *
@@ -57,7 +56,7 @@
def event_NVDAObject_init(self,obj):
global hwndWinamp
- hwndWinamp=windll.user32.FindWindowA("Winamp v1.x",None)
+ hwndWinamp = winUser.FindWindow("Winamp v1.x", None)
def chooseNVDAObjectOverlayClasses(self, obj, clsList):
windowClass = obj.windowClassName
| {"golden_diff": "diff --git a/source/appModules/winamp.py b/source/appModules/winamp.py\n--- a/source/appModules/winamp.py\n+++ b/source/appModules/winamp.py\n@@ -1,8 +1,7 @@\n-#appModules/winamp.py\r\n-#A part of NonVisual Desktop Access (NVDA)\r\n-#Copyright (C) 2006-2012 NVDA Contributors\r\n-#This file is covered by the GNU General Public License.\r\n-#See the file COPYING for more details.\r\n+# A part of NonVisual Desktop Access (NVDA)\r\n+# Copyright (C) 2006-2020 NV Access Limited\r\n+# This file is covered by the GNU General Public License.\r\n+# See the file COPYING for more details.\r\n \r\n from ctypes import *\r\n from ctypes.wintypes import *\r\n@@ -57,7 +56,7 @@\n \r\n \tdef event_NVDAObject_init(self,obj):\r\n \t\tglobal hwndWinamp\r\n-\t\thwndWinamp=windll.user32.FindWindowA(\"Winamp v1.x\",None)\r\n+\t\thwndWinamp = winUser.FindWindow(\"Winamp v1.x\", None)\r\n \r\n \tdef chooseNVDAObjectOverlayClasses(self, obj, clsList):\r\n \t\twindowClass = obj.windowClassName\n", "issue": "Winamp support: NVDA doesn't detect the status of shuffle and repeat modes\n<!--\r\nPlease thoroughly read NVDA's wiki article on how to fill in this template, including how to provide the required files.\r\nIssues may be closed if the required information is not present.\r\nhttps://github.com/nvaccess/nvda/wiki/Github-issue-template-explanation-and-examples\r\n-->\r\n\r\n### Steps to reproduce:\r\n1. Start winamp\r\n2. Press r or s toggle shuffle or repeat\r\n3. NVDA will say \"off\" every time you toggle those modes\r\n### Actual behavior:\r\nNVDA will say \"off\" every time you toggle those modes\r\n##\r\n### Expected behavior:\r\nNVDA needs to state if setting is on or off.\r\n### System configuration\r\n#### NVDA installed/portable/running from source:\r\nInstalled\r\n#### NVDA version:\r\n2019.3.1\r\n#### Windows version:\r\nWindows10 latest\r\n\r\n#### Name and version of other software in use when reproducing the issue:\r\nWinamp 5.666 Build 3516\r\n#### Other information about your system:\r\n\r\n### Other questions\r\n#### Does the issue still occur after restarting your computer?\r\nYes\r\n#### Have you tried any other versions of NVDA? If so, please report their behaviors.\r\nNo\r\n#### If addons are disabled, is your problem still occuring?\r\nYes\r\n#### Did you try to run the COM registry fixing tool in NVDA menu / tools?\r\nNo\n", "before_files": [{"content": "#appModules/winamp.py\r\n#A part of NonVisual Desktop Access (NVDA)\r\n#Copyright (C) 2006-2012 NVDA Contributors\r\n#This file is covered by the GNU General Public License.\r\n#See the file COPYING for more details.\r\n\r\nfrom ctypes import *\r\nfrom ctypes.wintypes import *\r\nimport winKernel\r\nimport winUser\r\nfrom scriptHandler import isScriptWaiting\r\nfrom NVDAObjects.IAccessible import IAccessible \r\nimport appModuleHandler\r\nimport speech\r\nimport locale\r\nimport controlTypes\r\nimport api\r\nimport watchdog\r\nimport braille\r\nimport ui\r\n\r\n# message used to sent many messages to winamp's main window. \r\n# most all of the IPC_* messages involve sending the message in the form of:\r\n# result = SendMessage(hwnd_winamp,WM_WA_IPC,(parameter),IPC_*);\r\n\r\nWM_WA_IPC=winUser.WM_USER\r\n\r\n# winamp window\r\nIPC_GET_SHUFFLE=250\r\nIPC_GET_REPEAT=251\r\n\r\n# playlist editor\r\nIPC_PLAYLIST_GET_NEXT_SELECTED=3029\r\nIPC_PE_GETCURINDEX=100\r\nIPC_PE_GETINDEXTOTAL=101\r\n# in_process ONLY\r\nIPC_PE_GETINDEXTITLE=200 # lParam = pointer to fileinfo2 structure\r\n\r\nclass fileinfo2(Structure):\r\n\t_fields_=[\r\n\t\t('fileindex',c_int),\r\n\t\t('filetitle',c_char*256),\r\n\t\t('filelength',c_char*16),\r\n\t]\r\n\r\nhwndWinamp=0\r\n\r\ndef getShuffle():\r\n\tglobal hwndWinamp\r\n\treturn watchdog.cancellableSendMessage(hwndWinamp,WM_WA_IPC,0,IPC_GET_SHUFFLE)\r\n\r\ndef getRepeat():\r\n\tglobal hwndWinamp\r\n\treturn watchdog.cancellableSendMessage(hwndWinamp,WM_WA_IPC,0,IPC_GET_REPEAT)\r\n\r\nclass AppModule(appModuleHandler.AppModule):\r\n\r\n\tdef event_NVDAObject_init(self,obj):\r\n\t\tglobal hwndWinamp\r\n\t\thwndWinamp=windll.user32.FindWindowA(\"Winamp v1.x\",None)\r\n\r\n\tdef chooseNVDAObjectOverlayClasses(self, obj, clsList):\r\n\t\twindowClass = obj.windowClassName\r\n\t\tif windowClass == \"Winamp PE\":\r\n\t\t\tclsList.insert(0, winampPlaylistEditor)\r\n\t\telif windowClass == \"Winamp v1.x\":\r\n\t\t\tclsList.insert(0, winampMainWindow)\r\n\r\nclass winampMainWindow(IAccessible):\r\n\r\n\tdef event_nameChange(self):\r\n\t\tpass\r\n\r\n\tdef script_shuffleToggle(self,gesture):\r\n\t\tgesture.send()\r\n\t\tif not isScriptWaiting():\r\n\t\t\tapi.processPendingEvents()\r\n\t\t\tif getShuffle():\r\n\t\t\t\t# Translators: the user has pressed the shuffle tracks toggle in winamp, shuffle is now on.\r\n\t\t\t\tonOff=pgettext(\"shuffle\", \"on\")\r\n\t\t\telse:\r\n\t\t\t\t# Translators: the user has pressed the shuffle tracks toggle in winamp, shuffle is now off.\r\n\t\t\t\tonOff=pgettext(\"shuffle\", \"off\")\r\n\t\t\tui.message(onOff)\r\n\r\n\tdef script_repeatToggle(self,gesture):\r\n\t\tgesture.send()\r\n\t\tif not isScriptWaiting():\r\n\t\t\tapi.processPendingEvents()\r\n\t\t\tif getRepeat():\r\n\t\t\t\t# Translators: the user has pressed the repeat track toggle in winamp, repeat is now on.\r\n\t\t\t\tonOff=pgettext(\"repeat\", \"on\")\r\n\t\t\telse:\r\n\t\t\t\t# Translators: the user has pressed the repeat track toggle in winamp, repeat is now off.\r\n\t\t\t\tonOff=pgettext(\"repeat\", \"off\")\r\n\t\t\tui.message(onOff)\r\n\r\n\t__gestures = {\r\n\t\t\"kb:s\": \"shuffleToggle\",\r\n\t\t\"kb:r\": \"repeatToggle\",\r\n\t}\r\n\r\nclass winampPlaylistEditor(winampMainWindow):\r\n\r\n\tdef _get_name(self):\r\n\t\tcurIndex=watchdog.cancellableSendMessage(hwndWinamp,WM_WA_IPC,-1,IPC_PLAYLIST_GET_NEXT_SELECTED)\r\n\t\tif curIndex <0:\r\n\t\t\treturn None\r\n\t\tinfo=fileinfo2()\r\n\t\tinfo.fileindex=curIndex\r\n\t\tinternalInfo=winKernel.virtualAllocEx(self.processHandle,None,sizeof(info),winKernel.MEM_COMMIT,winKernel.PAGE_READWRITE)\r\n\t\ttry:\r\n\t\t\twinKernel.writeProcessMemory(self.processHandle,internalInfo,byref(info),sizeof(info),None)\r\n\t\t\twatchdog.cancellableSendMessage(self.windowHandle,WM_WA_IPC,IPC_PE_GETINDEXTITLE,internalInfo)\r\n\t\t\twinKernel.readProcessMemory(self.processHandle,internalInfo,byref(info),sizeof(info),None)\r\n\t\tfinally:\r\n\t\t\twinKernel.virtualFreeEx(self.processHandle,internalInfo,0,winKernel.MEM_RELEASE)\r\n\t\t# file title is fetched in the current locale encoding.\r\n\t\t# We need to decode it to unicode first. \r\n\t\tencoding=locale.getlocale()[1]\r\n\t\tfileTitle=info.filetitle.decode(encoding,errors=\"replace\")\r\n\t\treturn \"%d.\\t%s\\t%s\"%(curIndex+1,fileTitle,info.filelength)\r\n\r\n\tdef _get_role(self):\r\n\t\treturn controlTypes.ROLE_LISTITEM\r\n\r\n\tdef script_changeItem(self,gesture):\r\n\t\tgesture.send()\r\n\t\tif not isScriptWaiting():\r\n\t\t\tapi.processPendingEvents()\r\n\t\t\tspeech.speakObject(self,reason=controlTypes.REASON_FOCUS)\r\n\t\t\tbraille.handler.handleGainFocus(self)\r\n\r\n\tdef event_nameChange(self):\r\n\t\treturn super(winampMainWindow,self).event_nameChange()\r\n\r\n\t__changeItemGestures = (\r\n\t\t\"kb:upArrow\",\r\n\t\t\"kb:downArrow\",\r\n\t\t\"kb:pageUp\",\r\n\t\t\"kb:pageDown\",\r\n\t)\r\n\r\n\tdef initOverlayClass(self):\r\n\t\tfor gesture in self.__changeItemGestures:\r\n\t\t\tself.bindGesture(gesture, \"changeItem\")\r\n", "path": "source/appModules/winamp.py"}]} | 2,414 | 271 |
gh_patches_debug_19139 | rasdani/github-patches | git_diff | mesonbuild__meson-127 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
shutil.ReadError: Unknown archive format (tar.xz)
```
Dowloading enet from http://enet.bespin.org/download/enet-1.3.13.tar.gz
Downloading patch from https://github.com/ignatenkobrain/meson_wrap_example/raw/master/subprojects/enet-meson.tar.xz
Traceback (most recent call last):
File "/home/brain/git/meson/meson.py", line 188, in run
app.generate()
File "/home/brain/git/meson/meson.py", line 129, in generate
intr.run()
File "/home/brain/git/meson/interpreter.py", line 871, in run
self.evaluate_codeblock(self.ast)
File "/home/brain/git/meson/interpreter.py", line 893, in evaluate_codeblock
raise e
File "/home/brain/git/meson/interpreter.py", line 887, in evaluate_codeblock
self.evaluate_statement(cur)
File "/home/brain/git/meson/interpreter.py", line 951, in evaluate_statement
return self.evaluate_if(cur)
File "/home/brain/git/meson/interpreter.py", line 1757, in evaluate_if
self.evaluate_codeblock(node.elseblock)
File "/home/brain/git/meson/interpreter.py", line 893, in evaluate_codeblock
raise e
File "/home/brain/git/meson/interpreter.py", line 887, in evaluate_codeblock
self.evaluate_statement(cur)
File "/home/brain/git/meson/interpreter.py", line 943, in evaluate_statement
return self.assignment(cur)
File "/home/brain/git/meson/interpreter.py", line 1637, in assignment
value = self.evaluate_statement(node.value)
File "/home/brain/git/meson/interpreter.py", line 941, in evaluate_statement
return self.function_call(cur)
File "/home/brain/git/meson/interpreter.py", line 1618, in function_call
return self.funcs[func_name](node, self.flatten(posargs), kwargs)
File "/home/brain/git/meson/interpreter.py", line 60, in wrapped
return f(self, node, args, kwargs)
File "/home/brain/git/meson/interpreter.py", line 52, in wrapped
return f(self, node, args, kwargs)
File "/home/brain/git/meson/interpreter.py", line 1071, in func_subproject
resolved = r.resolve(dirname)
File "/home/brain/git/meson/wrap.py", line 64, in resolve
self.extract_package(p)
File "/home/brain/git/meson/wrap.py", line 128, in extract_package
shutil.unpack_archive(os.path.join(self.cachedir, package.get('patch_filename')), self.subdir_root)
File "/usr/lib64/python3.4/shutil.py", line 953, in unpack_archive
raise ReadError("Unknown archive format '{0}'".format(filename))
shutil.ReadError: Unknown archive format '/home/brain/meson_wrap/subprojects/packagecache/enet-meson.tar.xz'
```
</issue>
<code>
[start of wrap.py]
1 # Copyright 2015 The Meson development team
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import mlog
16 import urllib.request, os, hashlib, shutil
17 import subprocess
18
19 class PackageDefinition:
20 def __init__(self, fname):
21 self.values = {}
22 ifile = open(fname)
23 first = ifile.readline().strip()
24
25 if first == '[wrap-file]':
26 self.type = 'file'
27 elif first == '[wrap-git]':
28 self.type = 'git'
29 else:
30 raise RuntimeError('Invalid format of package file')
31 for line in ifile:
32 line = line.strip()
33 if line == '':
34 continue
35 (k, v) = line.split('=', 1)
36 k = k.strip()
37 v = v.strip()
38 self.values[k] = v
39
40 def get(self, key):
41 return self.values[key]
42
43 def has_patch(self):
44 return 'patch_url' in self.values
45
46 class Resolver:
47 def __init__(self, subdir_root):
48 self.subdir_root = subdir_root
49 self.cachedir = os.path.join(self.subdir_root, 'packagecache')
50
51 def resolve(self, packagename):
52 fname = os.path.join(self.subdir_root, packagename + '.wrap')
53 dirname = os.path.join(self.subdir_root, packagename)
54 if not os.path.isfile(fname):
55 if os.path.isdir(dirname):
56 # No wrap file but dir exists -> user put it there manually.
57 return packagename
58 return None
59 p = PackageDefinition(fname)
60 if p.type == 'file':
61 if not os.path.isdir(self.cachedir):
62 os.mkdir(self.cachedir)
63 self.download(p, packagename)
64 self.extract_package(p)
65 elif p.type == 'git':
66 self.get_git(p)
67 else:
68 raise RuntimeError('Unreachable code.')
69 return p.get('directory')
70
71 def get_git(self, p):
72 checkoutdir = os.path.join(self.subdir_root, p.get('directory'))
73 revno = p.get('revision')
74 is_there = os.path.isdir(checkoutdir)
75 if is_there:
76 if revno.lower() == 'head':
77 subprocess.check_call(['git', 'pull'], cwd=checkoutdir)
78 else:
79 if subprocess.call(['git', 'checkout', revno], cwd=checkoutdir) != 0:
80 subprocess.check_call(['git', 'fetch'], cwd=checkoutdir)
81 subprocess.check_call(['git', 'checkout', revno],
82 cwd=checkoutdir)
83 else:
84 subprocess.check_call(['git', 'clone', p.get('url'),
85 p.get('directory')], cwd=self.subdir_root)
86 if revno.lower() != 'head':
87 subprocess.check_call(['git', 'checkout', revno],
88 cwd=checkoutdir)
89
90
91 def get_data(self, url):
92 u = urllib.request.urlopen(url)
93 data = u.read()
94 u.close()
95 h = hashlib.sha256()
96 h.update(data)
97 hashvalue = h.hexdigest()
98 return (data, hashvalue)
99
100 def download(self, p, packagename):
101 ofname = os.path.join(self.cachedir, p.get('source_filename'))
102 if os.path.exists(ofname):
103 mlog.log('Using', mlog.bold(packagename), 'from cache.')
104 return
105 srcurl = p.get('source_url')
106 mlog.log('Dowloading', mlog.bold(packagename), 'from', srcurl)
107 (srcdata, dhash) = self.get_data(srcurl)
108 expected = p.get('source_hash')
109 if dhash != expected:
110 raise RuntimeError('Incorrect hash for source %s:\n %s expected\n %s actual.' % (packagename, expected, dhash))
111 if p.has_patch():
112 purl = p.get('patch_url')
113 mlog.log('Downloading patch from', mlog.bold(purl))
114 (pdata, phash) = self.get_data(purl)
115 expected = p.get('patch_hash')
116 if phash != expected:
117 raise RuntimeError('Incorrect hash for patch %s:\n %s expected\n %s actual' % (packagename, expected, phash))
118 open(os.path.join(self.cachedir, p.get('patch_filename')), 'wb').write(pdata)
119 else:
120 mlog.log('Package does not require patch.')
121 open(ofname, 'wb').write(srcdata)
122
123 def extract_package(self, package):
124 if os.path.isdir(os.path.join(self.subdir_root, package.get('directory'))):
125 return
126 shutil.unpack_archive(os.path.join(self.cachedir, package.get('source_filename')), self.subdir_root)
127 if package.has_patch():
128 shutil.unpack_archive(os.path.join(self.cachedir, package.get('patch_filename')), self.subdir_root)
129
[end of wrap.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wrap.py b/wrap.py
--- a/wrap.py
+++ b/wrap.py
@@ -15,6 +15,7 @@
import mlog
import urllib.request, os, hashlib, shutil
import subprocess
+import sys
class PackageDefinition:
def __init__(self, fname):
@@ -121,6 +122,13 @@
open(ofname, 'wb').write(srcdata)
def extract_package(self, package):
+ if sys.version_info < (3, 5):
+ try:
+ import lzma
+ del lzma
+ shutil.register_unpack_format('xztar', ['.tar.xz', '.txz'], shutil._unpack_tarfile, [], "xz'ed tar-file")
+ except ImportError:
+ pass
if os.path.isdir(os.path.join(self.subdir_root, package.get('directory'))):
return
shutil.unpack_archive(os.path.join(self.cachedir, package.get('source_filename')), self.subdir_root)
| {"golden_diff": "diff --git a/wrap.py b/wrap.py\n--- a/wrap.py\n+++ b/wrap.py\n@@ -15,6 +15,7 @@\n import mlog\n import urllib.request, os, hashlib, shutil\n import subprocess\n+import sys\n \n class PackageDefinition:\n def __init__(self, fname):\n@@ -121,6 +122,13 @@\n open(ofname, 'wb').write(srcdata)\n \n def extract_package(self, package):\n+ if sys.version_info < (3, 5):\n+ try:\n+ import lzma\n+ del lzma\n+ shutil.register_unpack_format('xztar', ['.tar.xz', '.txz'], shutil._unpack_tarfile, [], \"xz'ed tar-file\")\n+ except ImportError:\n+ pass\n if os.path.isdir(os.path.join(self.subdir_root, package.get('directory'))):\n return\n shutil.unpack_archive(os.path.join(self.cachedir, package.get('source_filename')), self.subdir_root)\n", "issue": "shutil.ReadError: Unknown archive format (tar.xz)\n```\nDowloading enet from http://enet.bespin.org/download/enet-1.3.13.tar.gz\nDownloading patch from https://github.com/ignatenkobrain/meson_wrap_example/raw/master/subprojects/enet-meson.tar.xz\nTraceback (most recent call last):\n File \"/home/brain/git/meson/meson.py\", line 188, in run\n app.generate()\n File \"/home/brain/git/meson/meson.py\", line 129, in generate\n intr.run()\n File \"/home/brain/git/meson/interpreter.py\", line 871, in run\n self.evaluate_codeblock(self.ast)\n File \"/home/brain/git/meson/interpreter.py\", line 893, in evaluate_codeblock\n raise e\n File \"/home/brain/git/meson/interpreter.py\", line 887, in evaluate_codeblock\n self.evaluate_statement(cur)\n File \"/home/brain/git/meson/interpreter.py\", line 951, in evaluate_statement\n return self.evaluate_if(cur)\n File \"/home/brain/git/meson/interpreter.py\", line 1757, in evaluate_if\n self.evaluate_codeblock(node.elseblock)\n File \"/home/brain/git/meson/interpreter.py\", line 893, in evaluate_codeblock\n raise e\n File \"/home/brain/git/meson/interpreter.py\", line 887, in evaluate_codeblock\n self.evaluate_statement(cur)\n File \"/home/brain/git/meson/interpreter.py\", line 943, in evaluate_statement\n return self.assignment(cur)\n File \"/home/brain/git/meson/interpreter.py\", line 1637, in assignment\n value = self.evaluate_statement(node.value)\n File \"/home/brain/git/meson/interpreter.py\", line 941, in evaluate_statement\n return self.function_call(cur)\n File \"/home/brain/git/meson/interpreter.py\", line 1618, in function_call\n return self.funcs[func_name](node, self.flatten(posargs), kwargs)\n File \"/home/brain/git/meson/interpreter.py\", line 60, in wrapped\n return f(self, node, args, kwargs)\n File \"/home/brain/git/meson/interpreter.py\", line 52, in wrapped\n return f(self, node, args, kwargs)\n File \"/home/brain/git/meson/interpreter.py\", line 1071, in func_subproject\n resolved = r.resolve(dirname)\n File \"/home/brain/git/meson/wrap.py\", line 64, in resolve\n self.extract_package(p)\n File \"/home/brain/git/meson/wrap.py\", line 128, in extract_package\n shutil.unpack_archive(os.path.join(self.cachedir, package.get('patch_filename')), self.subdir_root)\n File \"/usr/lib64/python3.4/shutil.py\", line 953, in unpack_archive\n raise ReadError(\"Unknown archive format '{0}'\".format(filename))\nshutil.ReadError: Unknown archive format '/home/brain/meson_wrap/subprojects/packagecache/enet-meson.tar.xz'\n```\n\n", "before_files": [{"content": "# Copyright 2015 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport mlog\nimport urllib.request, os, hashlib, shutil\nimport subprocess\n\nclass PackageDefinition:\n def __init__(self, fname):\n self.values = {}\n ifile = open(fname)\n first = ifile.readline().strip()\n\n if first == '[wrap-file]':\n self.type = 'file'\n elif first == '[wrap-git]':\n self.type = 'git'\n else:\n raise RuntimeError('Invalid format of package file')\n for line in ifile:\n line = line.strip()\n if line == '':\n continue\n (k, v) = line.split('=', 1)\n k = k.strip()\n v = v.strip()\n self.values[k] = v\n\n def get(self, key):\n return self.values[key]\n\n def has_patch(self):\n return 'patch_url' in self.values\n\nclass Resolver:\n def __init__(self, subdir_root):\n self.subdir_root = subdir_root\n self.cachedir = os.path.join(self.subdir_root, 'packagecache')\n\n def resolve(self, packagename):\n fname = os.path.join(self.subdir_root, packagename + '.wrap')\n dirname = os.path.join(self.subdir_root, packagename)\n if not os.path.isfile(fname):\n if os.path.isdir(dirname):\n # No wrap file but dir exists -> user put it there manually.\n return packagename \n return None\n p = PackageDefinition(fname)\n if p.type == 'file':\n if not os.path.isdir(self.cachedir):\n os.mkdir(self.cachedir)\n self.download(p, packagename)\n self.extract_package(p)\n elif p.type == 'git':\n self.get_git(p)\n else:\n raise RuntimeError('Unreachable code.')\n return p.get('directory')\n\n def get_git(self, p):\n checkoutdir = os.path.join(self.subdir_root, p.get('directory'))\n revno = p.get('revision')\n is_there = os.path.isdir(checkoutdir)\n if is_there:\n if revno.lower() == 'head':\n subprocess.check_call(['git', 'pull'], cwd=checkoutdir)\n else:\n if subprocess.call(['git', 'checkout', revno], cwd=checkoutdir) != 0:\n subprocess.check_call(['git', 'fetch'], cwd=checkoutdir)\n subprocess.check_call(['git', 'checkout', revno],\n cwd=checkoutdir)\n else:\n subprocess.check_call(['git', 'clone', p.get('url'),\n p.get('directory')], cwd=self.subdir_root)\n if revno.lower() != 'head':\n subprocess.check_call(['git', 'checkout', revno],\n cwd=checkoutdir)\n\n\n def get_data(self, url):\n u = urllib.request.urlopen(url)\n data = u.read()\n u.close()\n h = hashlib.sha256()\n h.update(data)\n hashvalue = h.hexdigest()\n return (data, hashvalue)\n\n def download(self, p, packagename):\n ofname = os.path.join(self.cachedir, p.get('source_filename'))\n if os.path.exists(ofname):\n mlog.log('Using', mlog.bold(packagename), 'from cache.')\n return\n srcurl = p.get('source_url')\n mlog.log('Dowloading', mlog.bold(packagename), 'from', srcurl)\n (srcdata, dhash) = self.get_data(srcurl)\n expected = p.get('source_hash')\n if dhash != expected:\n raise RuntimeError('Incorrect hash for source %s:\\n %s expected\\n %s actual.' % (packagename, expected, dhash))\n if p.has_patch():\n purl = p.get('patch_url')\n mlog.log('Downloading patch from', mlog.bold(purl))\n (pdata, phash) = self.get_data(purl)\n expected = p.get('patch_hash')\n if phash != expected:\n raise RuntimeError('Incorrect hash for patch %s:\\n %s expected\\n %s actual' % (packagename, expected, phash))\n open(os.path.join(self.cachedir, p.get('patch_filename')), 'wb').write(pdata)\n else:\n mlog.log('Package does not require patch.')\n open(ofname, 'wb').write(srcdata)\n\n def extract_package(self, package):\n if os.path.isdir(os.path.join(self.subdir_root, package.get('directory'))):\n return\n shutil.unpack_archive(os.path.join(self.cachedir, package.get('source_filename')), self.subdir_root)\n if package.has_patch():\n shutil.unpack_archive(os.path.join(self.cachedir, package.get('patch_filename')), self.subdir_root)\n", "path": "wrap.py"}]} | 2,673 | 223 |
gh_patches_debug_16944 | rasdani/github-patches | git_diff | Mailu__Mailu-876 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing wildcard option in alias flask command
Hi guys,
First of all, thank you for making this awesome Mailu project :+1: :100:
I'm using command line to mass import some alias from an old server. There doesn't seem to have an option to toggle wildcard support:
```
Usage: flask mailu alias [OPTIONS] LOCALPART DOMAIN_NAME DESTINATION
Create an alias
Options:
--help Show this message and exit.
```
Would be great if it can be done through CLI. Thanks again!
</issue>
<code>
[start of core/admin/mailu/manage.py]
1 from mailu import models
2
3 from flask import current_app as app
4 from flask import cli as flask_cli
5
6 import flask
7 import os
8 import socket
9 import uuid
10 import click
11
12
13 db = models.db
14
15
16 @click.group()
17 def mailu(cls=flask_cli.FlaskGroup):
18 """ Mailu command line
19 """
20
21
22 @mailu.command()
23 @flask_cli.with_appcontext
24 def advertise():
25 """ Advertise this server against statistic services.
26 """
27 if os.path.isfile(app.config["INSTANCE_ID_PATH"]):
28 with open(app.config["INSTANCE_ID_PATH"], "r") as handle:
29 instance_id = handle.read()
30 else:
31 instance_id = str(uuid.uuid4())
32 with open(app.config["INSTANCE_ID_PATH"], "w") as handle:
33 handle.write(instance_id)
34 if not app.config["DISABLE_STATISTICS"]:
35 try:
36 socket.gethostbyname(app.config["STATS_ENDPOINT"].format(instance_id))
37 except:
38 pass
39
40
41 @mailu.command()
42 @click.argument('localpart')
43 @click.argument('domain_name')
44 @click.argument('password')
45 @flask_cli.with_appcontext
46 def admin(localpart, domain_name, password):
47 """ Create an admin user
48 """
49 domain = models.Domain.query.get(domain_name)
50 if not domain:
51 domain = models.Domain(name=domain_name)
52 db.session.add(domain)
53 user = models.User(
54 localpart=localpart,
55 domain=domain,
56 global_admin=True
57 )
58 user.set_password(password)
59 db.session.add(user)
60 db.session.commit()
61
62
63 @mailu.command()
64 @click.argument('localpart')
65 @click.argument('domain_name')
66 @click.argument('password')
67 @click.argument('hash_scheme')
68 @flask_cli.with_appcontext
69 def user(localpart, domain_name, password, hash_scheme=None):
70 """ Create a user
71 """
72 if hash_scheme is None:
73 hash_scheme = app.config['PASSWORD_SCHEME']
74 domain = models.Domain.query.get(domain_name)
75 if not domain:
76 domain = models.Domain(name=domain_name)
77 db.session.add(domain)
78 user = models.User(
79 localpart=localpart,
80 domain=domain,
81 global_admin=False
82 )
83 user.set_password(password, hash_scheme=hash_scheme)
84 db.session.add(user)
85 db.session.commit()
86
87
88 @mailu.command()
89 @click.option('-n', '--domain_name')
90 @click.option('-u', '--max_users')
91 @click.option('-a', '--max_aliases')
92 @click.option('-q', '--max_quota_bytes')
93 @flask_cli.with_appcontext
94 def domain(domain_name, max_users=-1, max_aliases=-1, max_quota_bytes=0):
95 domain = models.Domain.query.get(domain_name)
96 if not domain:
97 domain = models.Domain(name=domain_name)
98 db.session.add(domain)
99 db.session.commit()
100
101
102 @mailu.command()
103 @click.argument('localpart')
104 @click.argument('domain_name')
105 @click.argument('password_hash')
106 @click.argument('hash_scheme')
107 @flask_cli.with_appcontext
108 def user_import(localpart, domain_name, password_hash, hash_scheme = None):
109 """ Import a user along with password hash.
110 """
111 if hash_scheme is None:
112 hash_scheme = app.config['PASSWORD_SCHEME']
113 domain = models.Domain.query.get(domain_name)
114 if not domain:
115 domain = models.Domain(name=domain_name)
116 db.session.add(domain)
117 user = models.User(
118 localpart=localpart,
119 domain=domain,
120 global_admin=False
121 )
122 user.set_password(password_hash, hash_scheme=hash_scheme, raw=True)
123 db.session.add(user)
124 db.session.commit()
125
126
127 @mailu.command()
128 @click.option('-v', '--verbose')
129 @click.option('-d', '--delete_objects')
130 @flask_cli.with_appcontext
131 def config_update(verbose=False, delete_objects=False):
132 """sync configuration with data from YAML-formatted stdin"""
133 import yaml
134 import sys
135 new_config = yaml.load(sys.stdin)
136 # print new_config
137 domains = new_config.get('domains', [])
138 tracked_domains = set()
139 for domain_config in domains:
140 if verbose:
141 print(str(domain_config))
142 domain_name = domain_config['name']
143 max_users = domain_config.get('max_users', -1)
144 max_aliases = domain_config.get('max_aliases', -1)
145 max_quota_bytes = domain_config.get('max_quota_bytes', 0)
146 tracked_domains.add(domain_name)
147 domain = models.Domain.query.get(domain_name)
148 if not domain:
149 domain = models.Domain(name=domain_name,
150 max_users=max_users,
151 max_aliases=max_aliases,
152 max_quota_bytes=max_quota_bytes)
153 db.session.add(domain)
154 print("Added " + str(domain_config))
155 else:
156 domain.max_users = max_users
157 domain.max_aliases = max_aliases
158 domain.max_quota_bytes = max_quota_bytes
159 db.session.add(domain)
160 print("Updated " + str(domain_config))
161
162 users = new_config.get('users', [])
163 tracked_users = set()
164 user_optional_params = ('comment', 'quota_bytes', 'global_admin',
165 'enable_imap', 'enable_pop', 'forward_enabled',
166 'forward_destination', 'reply_enabled',
167 'reply_subject', 'reply_body', 'displayed_name',
168 'spam_enabled', 'email', 'spam_threshold')
169 for user_config in users:
170 if verbose:
171 print(str(user_config))
172 localpart = user_config['localpart']
173 domain_name = user_config['domain']
174 password_hash = user_config.get('password_hash', None)
175 hash_scheme = user_config.get('hash_scheme', None)
176 domain = models.Domain.query.get(domain_name)
177 email = '{0}@{1}'.format(localpart, domain_name)
178 optional_params = {}
179 for k in user_optional_params:
180 if k in user_config:
181 optional_params[k] = user_config[k]
182 if not domain:
183 domain = models.Domain(name=domain_name)
184 db.session.add(domain)
185 user = models.User.query.get(email)
186 tracked_users.add(email)
187 tracked_domains.add(domain_name)
188 if not user:
189 user = models.User(
190 localpart=localpart,
191 domain=domain,
192 **optional_params
193 )
194 else:
195 for k in optional_params:
196 setattr(user, k, optional_params[k])
197 user.set_password(password_hash, hash_scheme=hash_scheme, raw=True)
198 db.session.add(user)
199
200 aliases = new_config.get('aliases', [])
201 tracked_aliases = set()
202 for alias_config in aliases:
203 if verbose:
204 print(str(alias_config))
205 localpart = alias_config['localpart']
206 domain_name = alias_config['domain']
207 if type(alias_config['destination']) is str:
208 destination = alias_config['destination'].split(',')
209 else:
210 destination = alias_config['destination']
211 wildcard = alias_config.get('wildcard', False)
212 domain = models.Domain.query.get(domain_name)
213 email = '{0}@{1}'.format(localpart, domain_name)
214 if not domain:
215 domain = models.Domain(name=domain_name)
216 db.session.add(domain)
217 alias = models.Alias.query.get(email)
218 tracked_aliases.add(email)
219 tracked_domains.add(domain_name)
220 if not alias:
221 alias = models.Alias(
222 localpart=localpart,
223 domain=domain,
224 wildcard=wildcard,
225 destination=destination,
226 email=email
227 )
228 else:
229 alias.destination = destination
230 alias.wildcard = wildcard
231 db.session.add(alias)
232
233 db.session.commit()
234
235 managers = new_config.get('managers', [])
236 # tracked_managers=set()
237 for manager_config in managers:
238 if verbose:
239 print(str(manager_config))
240 domain_name = manager_config['domain']
241 user_name = manager_config['user']
242 domain = models.Domain.query.get(domain_name)
243 manageruser = models.User.query.get(user_name + '@' + domain_name)
244 if manageruser not in domain.managers:
245 domain.managers.append(manageruser)
246 db.session.add(domain)
247
248 db.session.commit()
249
250 if delete_objects:
251 for user in db.session.query(models.User).all():
252 if not (user.email in tracked_users):
253 if verbose:
254 print("Deleting user: " + str(user.email))
255 db.session.delete(user)
256 for alias in db.session.query(models.Alias).all():
257 if not (alias.email in tracked_aliases):
258 if verbose:
259 print("Deleting alias: " + str(alias.email))
260 db.session.delete(alias)
261 for domain in db.session.query(models.Domain).all():
262 if not (domain.name in tracked_domains):
263 if verbose:
264 print("Deleting domain: " + str(domain.name))
265 db.session.delete(domain)
266 db.session.commit()
267
268
269 @mailu.command()
270 @click.argument('email')
271 @flask_cli.with_appcontext
272 def user_delete(email):
273 """delete user"""
274 user = models.User.query.get(email)
275 if user:
276 db.session.delete(user)
277 db.session.commit()
278
279
280 @mailu.command()
281 @click.argument('email')
282 @flask_cli.with_appcontext
283 def alias_delete(email):
284 """delete alias"""
285 alias = models.Alias.query.get(email)
286 if alias:
287 db.session.delete(alias)
288 db.session.commit()
289
290
291 @mailu.command()
292 @click.argument('localpart')
293 @click.argument('domain_name')
294 @click.argument('destination')
295 @flask_cli.with_appcontext
296 def alias(localpart, domain_name, destination):
297 """ Create an alias
298 """
299 domain = models.Domain.query.get(domain_name)
300 if not domain:
301 domain = models.Domain(name=domain_name)
302 db.session.add(domain)
303 alias = models.Alias(
304 localpart=localpart,
305 domain=domain,
306 destination=destination.split(','),
307 email="%s@%s" % (localpart, domain_name)
308 )
309 db.session.add(alias)
310 db.session.commit()
311
312
313 @mailu.command()
314 @click.argument('domain_name')
315 @click.argument('max_users')
316 @click.argument('max_aliases')
317 @click.argument('max_quota_bytes')
318 @flask_cli.with_appcontext
319 def setlimits(domain_name, max_users, max_aliases, max_quota_bytes):
320 """ Set domain limits
321 """
322 domain = models.Domain.query.get(domain_name)
323 domain.max_users = max_users
324 domain.max_aliases = max_aliases
325 domain.max_quota_bytes = max_quota_bytes
326 db.session.add(domain)
327 db.session.commit()
328
329
330 @mailu.command()
331 @click.argument('domain_name')
332 @click.argument('user_name')
333 @flask_cli.with_appcontext
334 def setmanager(domain_name, user_name='manager'):
335 """ Make a user manager of a domain
336 """
337 domain = models.Domain.query.get(domain_name)
338 manageruser = models.User.query.get(user_name + '@' + domain_name)
339 domain.managers.append(manageruser)
340 db.session.add(domain)
341 db.session.commit()
342
343
344 if __name__ == '__main__':
345 cli()
346
[end of core/admin/mailu/manage.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/admin/mailu/manage.py b/core/admin/mailu/manage.py
--- a/core/admin/mailu/manage.py
+++ b/core/admin/mailu/manage.py
@@ -292,8 +292,9 @@
@click.argument('localpart')
@click.argument('domain_name')
@click.argument('destination')
[email protected]('-w', '--wildcard', is_flag=True)
@flask_cli.with_appcontext
-def alias(localpart, domain_name, destination):
+def alias(localpart, domain_name, destination, wildcard=False):
""" Create an alias
"""
domain = models.Domain.query.get(domain_name)
@@ -303,6 +304,7 @@
alias = models.Alias(
localpart=localpart,
domain=domain,
+ wildcard=wildcard,
destination=destination.split(','),
email="%s@%s" % (localpart, domain_name)
)
| {"golden_diff": "diff --git a/core/admin/mailu/manage.py b/core/admin/mailu/manage.py\n--- a/core/admin/mailu/manage.py\n+++ b/core/admin/mailu/manage.py\n@@ -292,8 +292,9 @@\n @click.argument('localpart')\n @click.argument('domain_name')\n @click.argument('destination')\[email protected]('-w', '--wildcard', is_flag=True)\n @flask_cli.with_appcontext\n-def alias(localpart, domain_name, destination):\n+def alias(localpart, domain_name, destination, wildcard=False):\n \"\"\" Create an alias\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n@@ -303,6 +304,7 @@\n alias = models.Alias(\n localpart=localpart,\n domain=domain,\n+ wildcard=wildcard,\n destination=destination.split(','),\n email=\"%s@%s\" % (localpart, domain_name)\n )\n", "issue": "Missing wildcard option in alias flask command\nHi guys,\r\n\r\nFirst of all, thank you for making this awesome Mailu project :+1: :100: \r\n\r\nI'm using command line to mass import some alias from an old server. There doesn't seem to have an option to toggle wildcard support:\r\n\r\n```\r\nUsage: flask mailu alias [OPTIONS] LOCALPART DOMAIN_NAME DESTINATION\r\n\r\n Create an alias\r\n\r\nOptions:\r\n --help Show this message and exit.\r\n```\r\n\r\nWould be great if it can be done through CLI. Thanks again!\n", "before_files": [{"content": "from mailu import models\n\nfrom flask import current_app as app\nfrom flask import cli as flask_cli\n\nimport flask\nimport os\nimport socket\nimport uuid\nimport click\n\n\ndb = models.db\n\n\[email protected]()\ndef mailu(cls=flask_cli.FlaskGroup):\n \"\"\" Mailu command line\n \"\"\"\n\n\[email protected]()\n@flask_cli.with_appcontext\ndef advertise():\n \"\"\" Advertise this server against statistic services.\n \"\"\"\n if os.path.isfile(app.config[\"INSTANCE_ID_PATH\"]):\n with open(app.config[\"INSTANCE_ID_PATH\"], \"r\") as handle:\n instance_id = handle.read()\n else:\n instance_id = str(uuid.uuid4())\n with open(app.config[\"INSTANCE_ID_PATH\"], \"w\") as handle:\n handle.write(instance_id)\n if not app.config[\"DISABLE_STATISTICS\"]:\n try:\n socket.gethostbyname(app.config[\"STATS_ENDPOINT\"].format(instance_id))\n except:\n pass\n\n\[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('password')\n@flask_cli.with_appcontext\ndef admin(localpart, domain_name, password):\n \"\"\" Create an admin user\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n user = models.User(\n localpart=localpart,\n domain=domain,\n global_admin=True\n )\n user.set_password(password)\n db.session.add(user)\n db.session.commit()\n\n\[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('password')\[email protected]('hash_scheme')\n@flask_cli.with_appcontext\ndef user(localpart, domain_name, password, hash_scheme=None):\n \"\"\" Create a user\n \"\"\"\n if hash_scheme is None:\n hash_scheme = app.config['PASSWORD_SCHEME']\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n user = models.User(\n localpart=localpart,\n domain=domain,\n global_admin=False\n )\n user.set_password(password, hash_scheme=hash_scheme)\n db.session.add(user)\n db.session.commit()\n\n\[email protected]()\[email protected]('-n', '--domain_name')\[email protected]('-u', '--max_users')\[email protected]('-a', '--max_aliases')\[email protected]('-q', '--max_quota_bytes')\n@flask_cli.with_appcontext\ndef domain(domain_name, max_users=-1, max_aliases=-1, max_quota_bytes=0):\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n db.session.commit()\n\n\[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('password_hash')\[email protected]('hash_scheme')\n@flask_cli.with_appcontext\ndef user_import(localpart, domain_name, password_hash, hash_scheme = None):\n \"\"\" Import a user along with password hash.\n \"\"\"\n if hash_scheme is None:\n hash_scheme = app.config['PASSWORD_SCHEME']\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n user = models.User(\n localpart=localpart,\n domain=domain,\n global_admin=False\n )\n user.set_password(password_hash, hash_scheme=hash_scheme, raw=True)\n db.session.add(user)\n db.session.commit()\n\n\[email protected]()\[email protected]('-v', '--verbose')\[email protected]('-d', '--delete_objects')\n@flask_cli.with_appcontext\ndef config_update(verbose=False, delete_objects=False):\n \"\"\"sync configuration with data from YAML-formatted stdin\"\"\"\n import yaml\n import sys\n new_config = yaml.load(sys.stdin)\n # print new_config\n domains = new_config.get('domains', [])\n tracked_domains = set()\n for domain_config in domains:\n if verbose:\n print(str(domain_config))\n domain_name = domain_config['name']\n max_users = domain_config.get('max_users', -1)\n max_aliases = domain_config.get('max_aliases', -1)\n max_quota_bytes = domain_config.get('max_quota_bytes', 0)\n tracked_domains.add(domain_name)\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name,\n max_users=max_users,\n max_aliases=max_aliases,\n max_quota_bytes=max_quota_bytes)\n db.session.add(domain)\n print(\"Added \" + str(domain_config))\n else:\n domain.max_users = max_users\n domain.max_aliases = max_aliases\n domain.max_quota_bytes = max_quota_bytes\n db.session.add(domain)\n print(\"Updated \" + str(domain_config))\n\n users = new_config.get('users', [])\n tracked_users = set()\n user_optional_params = ('comment', 'quota_bytes', 'global_admin',\n 'enable_imap', 'enable_pop', 'forward_enabled',\n 'forward_destination', 'reply_enabled',\n 'reply_subject', 'reply_body', 'displayed_name',\n 'spam_enabled', 'email', 'spam_threshold')\n for user_config in users:\n if verbose:\n print(str(user_config))\n localpart = user_config['localpart']\n domain_name = user_config['domain']\n password_hash = user_config.get('password_hash', None)\n hash_scheme = user_config.get('hash_scheme', None)\n domain = models.Domain.query.get(domain_name)\n email = '{0}@{1}'.format(localpart, domain_name)\n optional_params = {}\n for k in user_optional_params:\n if k in user_config:\n optional_params[k] = user_config[k]\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n user = models.User.query.get(email)\n tracked_users.add(email)\n tracked_domains.add(domain_name)\n if not user:\n user = models.User(\n localpart=localpart,\n domain=domain,\n **optional_params\n )\n else:\n for k in optional_params:\n setattr(user, k, optional_params[k])\n user.set_password(password_hash, hash_scheme=hash_scheme, raw=True)\n db.session.add(user)\n\n aliases = new_config.get('aliases', [])\n tracked_aliases = set()\n for alias_config in aliases:\n if verbose:\n print(str(alias_config))\n localpart = alias_config['localpart']\n domain_name = alias_config['domain']\n if type(alias_config['destination']) is str:\n destination = alias_config['destination'].split(',')\n else:\n destination = alias_config['destination']\n wildcard = alias_config.get('wildcard', False)\n domain = models.Domain.query.get(domain_name)\n email = '{0}@{1}'.format(localpart, domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n alias = models.Alias.query.get(email)\n tracked_aliases.add(email)\n tracked_domains.add(domain_name)\n if not alias:\n alias = models.Alias(\n localpart=localpart,\n domain=domain,\n wildcard=wildcard,\n destination=destination,\n email=email\n )\n else:\n alias.destination = destination\n alias.wildcard = wildcard\n db.session.add(alias)\n\n db.session.commit()\n\n managers = new_config.get('managers', [])\n # tracked_managers=set()\n for manager_config in managers:\n if verbose:\n print(str(manager_config))\n domain_name = manager_config['domain']\n user_name = manager_config['user']\n domain = models.Domain.query.get(domain_name)\n manageruser = models.User.query.get(user_name + '@' + domain_name)\n if manageruser not in domain.managers:\n domain.managers.append(manageruser)\n db.session.add(domain)\n\n db.session.commit()\n\n if delete_objects:\n for user in db.session.query(models.User).all():\n if not (user.email in tracked_users):\n if verbose:\n print(\"Deleting user: \" + str(user.email))\n db.session.delete(user)\n for alias in db.session.query(models.Alias).all():\n if not (alias.email in tracked_aliases):\n if verbose:\n print(\"Deleting alias: \" + str(alias.email))\n db.session.delete(alias)\n for domain in db.session.query(models.Domain).all():\n if not (domain.name in tracked_domains):\n if verbose:\n print(\"Deleting domain: \" + str(domain.name))\n db.session.delete(domain)\n db.session.commit()\n\n\[email protected]()\[email protected]('email')\n@flask_cli.with_appcontext\ndef user_delete(email):\n \"\"\"delete user\"\"\"\n user = models.User.query.get(email)\n if user:\n db.session.delete(user)\n db.session.commit()\n\n\[email protected]()\[email protected]('email')\n@flask_cli.with_appcontext\ndef alias_delete(email):\n \"\"\"delete alias\"\"\"\n alias = models.Alias.query.get(email)\n if alias:\n db.session.delete(alias)\n db.session.commit()\n\n\[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('destination')\n@flask_cli.with_appcontext\ndef alias(localpart, domain_name, destination):\n \"\"\" Create an alias\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n alias = models.Alias(\n localpart=localpart,\n domain=domain,\n destination=destination.split(','),\n email=\"%s@%s\" % (localpart, domain_name)\n )\n db.session.add(alias)\n db.session.commit()\n\n\[email protected]()\[email protected]('domain_name')\[email protected]('max_users')\[email protected]('max_aliases')\[email protected]('max_quota_bytes')\n@flask_cli.with_appcontext\ndef setlimits(domain_name, max_users, max_aliases, max_quota_bytes):\n \"\"\" Set domain limits\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n domain.max_users = max_users\n domain.max_aliases = max_aliases\n domain.max_quota_bytes = max_quota_bytes\n db.session.add(domain)\n db.session.commit()\n\n\[email protected]()\[email protected]('domain_name')\[email protected]('user_name')\n@flask_cli.with_appcontext\ndef setmanager(domain_name, user_name='manager'):\n \"\"\" Make a user manager of a domain\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n manageruser = models.User.query.get(user_name + '@' + domain_name)\n domain.managers.append(manageruser)\n db.session.add(domain)\n db.session.commit()\n\n\nif __name__ == '__main__':\n cli()\n", "path": "core/admin/mailu/manage.py"}]} | 3,923 | 200 |
gh_patches_debug_4798 | rasdani/github-patches | git_diff | pytorch__vision-2793 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The ASPP layer has a hard-coded value that only works if the atrous_rates param is of length 3
## 🐛 Bug
The ASPP layer has a hard-coded value that only works if the list of atrous rates provided is of length 3. The hard-coded value in question is the `5` on line 83 here:
https://github.com/pytorch/vision/blob/master/torchvision/models/segmentation/deeplabv3.py#L82-L86
## To Reproduce
```python3
aspp = tv.models.segmentation.deeplabv3.ASPP(1, [12, 24]) # DeepLab uses [12, 24, 36], which works
aspp.eval()
with torch.no_grad():
out = aspp(torch.empty(1, 1, 256, 256))
```
Results in
```
RuntimeError: Given groups=1, weight of size [256, 1280, 1, 1], expected input[1, 1024, 256, 256] to have 1280 channels, but got 1024 channels instead
```
## Expected behavior
It should work with different number of atrous rates. This can be fixed simply by replacing
```python3
nn.Conv2d(5 * out_channels, out_channels, 1, bias=False)
```
with
```python3
nn.Conv2d(len(self.convs) * out_channels, out_channels, 1, bias=False)
```
## Environment
```
PyTorch version: 1.6.0
Is debug build: False
CUDA used to build PyTorch: 10.2
ROCM used to build PyTorch: N/A
OS: Ubuntu 20.04.1 LTS (x86_64)
GCC version: (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0
Clang version: Could not collect
CMake version: version 3.18.2
Python version: 3.7 (64-bit runtime)
Is CUDA available: True
CUDA runtime version: Could not collect
GPU models and configuration: GPU 0: GeForce GTX 1060 with Max-Q Design
Nvidia driver version: 450.66
cuDNN version: /usr/lib/x86_64-linux-gnu/libcudnn.so.7.6.1
HIP runtime version: N/A
MIOpen runtime version: N/A
Versions of relevant libraries:
[pip3] numpy==1.19.1
[pip3] torch==1.6.0
[pip3] torchvision==0.7.0
[conda] blas 1.0 mkl
[conda] cudatoolkit 10.2.89 hfd86e86_1
[conda] mkl 2020.2 256
[conda] mkl-service 2.3.0 py37he904b0f_0
[conda] mkl_fft 1.1.0 py37h23d657b_0
[conda] mkl_random 1.1.1 py37h0573a6f_0
[conda] numpy 1.19.1 py37hbc911f0_0
[conda] numpy-base 1.19.1 py37hfa32c7d_0
[conda] torch 1.6.0 pypi_0 pypi
[conda] torchvision 0.7.0 pypi_0 pypi
```
cc @vfdev-5
</issue>
<code>
[start of torchvision/models/segmentation/deeplabv3.py]
1 import torch
2 from torch import nn
3 from torch.nn import functional as F
4
5 from ._utils import _SimpleSegmentationModel
6
7
8 __all__ = ["DeepLabV3"]
9
10
11 class DeepLabV3(_SimpleSegmentationModel):
12 """
13 Implements DeepLabV3 model from
14 `"Rethinking Atrous Convolution for Semantic Image Segmentation"
15 <https://arxiv.org/abs/1706.05587>`_.
16
17 Arguments:
18 backbone (nn.Module): the network used to compute the features for the model.
19 The backbone should return an OrderedDict[Tensor], with the key being
20 "out" for the last feature map used, and "aux" if an auxiliary classifier
21 is used.
22 classifier (nn.Module): module that takes the "out" element returned from
23 the backbone and returns a dense prediction.
24 aux_classifier (nn.Module, optional): auxiliary classifier used during training
25 """
26 pass
27
28
29 class DeepLabHead(nn.Sequential):
30 def __init__(self, in_channels, num_classes):
31 super(DeepLabHead, self).__init__(
32 ASPP(in_channels, [12, 24, 36]),
33 nn.Conv2d(256, 256, 3, padding=1, bias=False),
34 nn.BatchNorm2d(256),
35 nn.ReLU(),
36 nn.Conv2d(256, num_classes, 1)
37 )
38
39
40 class ASPPConv(nn.Sequential):
41 def __init__(self, in_channels, out_channels, dilation):
42 modules = [
43 nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False),
44 nn.BatchNorm2d(out_channels),
45 nn.ReLU()
46 ]
47 super(ASPPConv, self).__init__(*modules)
48
49
50 class ASPPPooling(nn.Sequential):
51 def __init__(self, in_channels, out_channels):
52 super(ASPPPooling, self).__init__(
53 nn.AdaptiveAvgPool2d(1),
54 nn.Conv2d(in_channels, out_channels, 1, bias=False),
55 nn.BatchNorm2d(out_channels),
56 nn.ReLU())
57
58 def forward(self, x):
59 size = x.shape[-2:]
60 for mod in self:
61 x = mod(x)
62 return F.interpolate(x, size=size, mode='bilinear', align_corners=False)
63
64
65 class ASPP(nn.Module):
66 def __init__(self, in_channels, atrous_rates, out_channels=256):
67 super(ASPP, self).__init__()
68 modules = []
69 modules.append(nn.Sequential(
70 nn.Conv2d(in_channels, out_channels, 1, bias=False),
71 nn.BatchNorm2d(out_channels),
72 nn.ReLU()))
73
74 rates = tuple(atrous_rates)
75 for rate in rates:
76 modules.append(ASPPConv(in_channels, out_channels, rate))
77
78 modules.append(ASPPPooling(in_channels, out_channels))
79
80 self.convs = nn.ModuleList(modules)
81
82 self.project = nn.Sequential(
83 nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),
84 nn.BatchNorm2d(out_channels),
85 nn.ReLU(),
86 nn.Dropout(0.5))
87
88 def forward(self, x):
89 res = []
90 for conv in self.convs:
91 res.append(conv(x))
92 res = torch.cat(res, dim=1)
93 return self.project(res)
94
[end of torchvision/models/segmentation/deeplabv3.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchvision/models/segmentation/deeplabv3.py b/torchvision/models/segmentation/deeplabv3.py
--- a/torchvision/models/segmentation/deeplabv3.py
+++ b/torchvision/models/segmentation/deeplabv3.py
@@ -80,7 +80,7 @@
self.convs = nn.ModuleList(modules)
self.project = nn.Sequential(
- nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),
+ nn.Conv2d(len(self.convs) * out_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.Dropout(0.5))
| {"golden_diff": "diff --git a/torchvision/models/segmentation/deeplabv3.py b/torchvision/models/segmentation/deeplabv3.py\n--- a/torchvision/models/segmentation/deeplabv3.py\n+++ b/torchvision/models/segmentation/deeplabv3.py\n@@ -80,7 +80,7 @@\n self.convs = nn.ModuleList(modules)\n \n self.project = nn.Sequential(\n- nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),\n+ nn.Conv2d(len(self.convs) * out_channels, out_channels, 1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(),\n nn.Dropout(0.5))\n", "issue": "The ASPP layer has a hard-coded value that only works if the atrous_rates param is of length 3\n## \ud83d\udc1b Bug\r\n\r\nThe ASPP layer has a hard-coded value that only works if the list of atrous rates provided is of length 3. The hard-coded value in question is the `5` on line 83 here:\r\nhttps://github.com/pytorch/vision/blob/master/torchvision/models/segmentation/deeplabv3.py#L82-L86\r\n\r\n## To Reproduce\r\n\r\n```python3\r\naspp = tv.models.segmentation.deeplabv3.ASPP(1, [12, 24]) # DeepLab uses [12, 24, 36], which works\r\n\r\naspp.eval()\r\nwith torch.no_grad():\r\n out = aspp(torch.empty(1, 1, 256, 256))\r\n```\r\nResults in\r\n```\r\nRuntimeError: Given groups=1, weight of size [256, 1280, 1, 1], expected input[1, 1024, 256, 256] to have 1280 channels, but got 1024 channels instead\r\n```\r\n\r\n## Expected behavior\r\n\r\nIt should work with different number of atrous rates. This can be fixed simply by replacing\r\n```python3\r\nnn.Conv2d(5 * out_channels, out_channels, 1, bias=False)\r\n```\r\nwith\r\n\r\n```python3\r\nnn.Conv2d(len(self.convs) * out_channels, out_channels, 1, bias=False)\r\n```\r\n\r\n## Environment\r\n\r\n```\r\nPyTorch version: 1.6.0\r\nIs debug build: False\r\nCUDA used to build PyTorch: 10.2\r\nROCM used to build PyTorch: N/A\r\n\r\nOS: Ubuntu 20.04.1 LTS (x86_64)\r\nGCC version: (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0\r\nClang version: Could not collect\r\nCMake version: version 3.18.2\r\n\r\nPython version: 3.7 (64-bit runtime)\r\nIs CUDA available: True\r\nCUDA runtime version: Could not collect\r\nGPU models and configuration: GPU 0: GeForce GTX 1060 with Max-Q Design\r\nNvidia driver version: 450.66\r\ncuDNN version: /usr/lib/x86_64-linux-gnu/libcudnn.so.7.6.1\r\nHIP runtime version: N/A\r\nMIOpen runtime version: N/A\r\n\r\nVersions of relevant libraries:\r\n[pip3] numpy==1.19.1\r\n[pip3] torch==1.6.0\r\n[pip3] torchvision==0.7.0\r\n[conda] blas 1.0 mkl \r\n[conda] cudatoolkit 10.2.89 hfd86e86_1 \r\n[conda] mkl 2020.2 256 \r\n[conda] mkl-service 2.3.0 py37he904b0f_0 \r\n[conda] mkl_fft 1.1.0 py37h23d657b_0 \r\n[conda] mkl_random 1.1.1 py37h0573a6f_0 \r\n[conda] numpy 1.19.1 py37hbc911f0_0 \r\n[conda] numpy-base 1.19.1 py37hfa32c7d_0 \r\n[conda] torch 1.6.0 pypi_0 pypi\r\n[conda] torchvision 0.7.0 pypi_0 pypi\r\n```\r\n\n\ncc @vfdev-5\n", "before_files": [{"content": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom ._utils import _SimpleSegmentationModel\n\n\n__all__ = [\"DeepLabV3\"]\n\n\nclass DeepLabV3(_SimpleSegmentationModel):\n \"\"\"\n Implements DeepLabV3 model from\n `\"Rethinking Atrous Convolution for Semantic Image Segmentation\"\n <https://arxiv.org/abs/1706.05587>`_.\n\n Arguments:\n backbone (nn.Module): the network used to compute the features for the model.\n The backbone should return an OrderedDict[Tensor], with the key being\n \"out\" for the last feature map used, and \"aux\" if an auxiliary classifier\n is used.\n classifier (nn.Module): module that takes the \"out\" element returned from\n the backbone and returns a dense prediction.\n aux_classifier (nn.Module, optional): auxiliary classifier used during training\n \"\"\"\n pass\n\n\nclass DeepLabHead(nn.Sequential):\n def __init__(self, in_channels, num_classes):\n super(DeepLabHead, self).__init__(\n ASPP(in_channels, [12, 24, 36]),\n nn.Conv2d(256, 256, 3, padding=1, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(),\n nn.Conv2d(256, num_classes, 1)\n )\n\n\nclass ASPPConv(nn.Sequential):\n def __init__(self, in_channels, out_channels, dilation):\n modules = [\n nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU()\n ]\n super(ASPPConv, self).__init__(*modules)\n\n\nclass ASPPPooling(nn.Sequential):\n def __init__(self, in_channels, out_channels):\n super(ASPPPooling, self).__init__(\n nn.AdaptiveAvgPool2d(1),\n nn.Conv2d(in_channels, out_channels, 1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU())\n\n def forward(self, x):\n size = x.shape[-2:]\n for mod in self:\n x = mod(x)\n return F.interpolate(x, size=size, mode='bilinear', align_corners=False)\n\n\nclass ASPP(nn.Module):\n def __init__(self, in_channels, atrous_rates, out_channels=256):\n super(ASPP, self).__init__()\n modules = []\n modules.append(nn.Sequential(\n nn.Conv2d(in_channels, out_channels, 1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU()))\n\n rates = tuple(atrous_rates)\n for rate in rates:\n modules.append(ASPPConv(in_channels, out_channels, rate))\n\n modules.append(ASPPPooling(in_channels, out_channels))\n\n self.convs = nn.ModuleList(modules)\n\n self.project = nn.Sequential(\n nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(),\n nn.Dropout(0.5))\n\n def forward(self, x):\n res = []\n for conv in self.convs:\n res.append(conv(x))\n res = torch.cat(res, dim=1)\n return self.project(res)\n", "path": "torchvision/models/segmentation/deeplabv3.py"}]} | 2,328 | 164 |
gh_patches_debug_14203 | rasdani/github-patches | git_diff | getsentry__sentry-python-2476 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
FastApiIntegration hides request handler function name
### How do you use Sentry?
Sentry Saas (sentry.io)
### Version
1.31.0
### Steps to Reproduce
Use the python sentry sdk in conjunction with FastAPI and the NewRelic python agent.
View transactions in the NewRelic UI.
### Expected Result
The NewRelic transaction name should show the function name handling the FastAPI route.
### Actual Result
New Relic services representing our new fastapi services are seeing significant time spent in a transaction named `sentry_sdk.integrations.fastapi:patch_get_request_handler.<locals>._sentry_get_request_handler.<locals>._sentry_call`.

The source code for `fastspi:patch_get_request_handler` wraps non-async request handlers in a local function called `_sentry_call`. This local function shares the same `__qualname__` responsible for the significant transaction time. The result is every route handler has the same name and it appears in New Relic that the service has one transaction type.
Sentry sdk integrations wrap functions all the time to create sentry scope and yet we never have this problem. It appears the issue comes from a simple oversight when writing the fastapi integration. Other integrations use `sentry_sdk._functools:wraps` to wrap a function with sentry specific behavior, yet ensure the name, qualname, etc. are retrained from the wrapped function.
</issue>
<code>
[start of sentry_sdk/integrations/fastapi.py]
1 import asyncio
2 from copy import deepcopy
3
4 from sentry_sdk._types import TYPE_CHECKING
5 from sentry_sdk.hub import Hub, _should_send_default_pii
6 from sentry_sdk.integrations import DidNotEnable
7 from sentry_sdk.tracing import SOURCE_FOR_STYLE, TRANSACTION_SOURCE_ROUTE
8 from sentry_sdk.utils import transaction_from_function, logger
9
10 if TYPE_CHECKING:
11 from typing import Any, Callable, Dict
12 from sentry_sdk.scope import Scope
13
14 try:
15 from sentry_sdk.integrations.starlette import (
16 StarletteIntegration,
17 StarletteRequestExtractor,
18 )
19 except DidNotEnable:
20 raise DidNotEnable("Starlette is not installed")
21
22 try:
23 import fastapi # type: ignore
24 except ImportError:
25 raise DidNotEnable("FastAPI is not installed")
26
27
28 _DEFAULT_TRANSACTION_NAME = "generic FastAPI request"
29
30
31 class FastApiIntegration(StarletteIntegration):
32 identifier = "fastapi"
33
34 @staticmethod
35 def setup_once():
36 # type: () -> None
37 patch_get_request_handler()
38
39
40 def _set_transaction_name_and_source(scope, transaction_style, request):
41 # type: (Scope, str, Any) -> None
42 name = ""
43
44 if transaction_style == "endpoint":
45 endpoint = request.scope.get("endpoint")
46 if endpoint:
47 name = transaction_from_function(endpoint) or ""
48
49 elif transaction_style == "url":
50 route = request.scope.get("route")
51 if route:
52 path = getattr(route, "path", None)
53 if path is not None:
54 name = path
55
56 if not name:
57 name = _DEFAULT_TRANSACTION_NAME
58 source = TRANSACTION_SOURCE_ROUTE
59 else:
60 source = SOURCE_FOR_STYLE[transaction_style]
61
62 scope.set_transaction_name(name, source=source)
63 logger.debug(
64 "[FastAPI] Set transaction name and source on scope: %s / %s", name, source
65 )
66
67
68 def patch_get_request_handler():
69 # type: () -> None
70 old_get_request_handler = fastapi.routing.get_request_handler
71
72 def _sentry_get_request_handler(*args, **kwargs):
73 # type: (*Any, **Any) -> Any
74 dependant = kwargs.get("dependant")
75 if (
76 dependant
77 and dependant.call is not None
78 and not asyncio.iscoroutinefunction(dependant.call)
79 ):
80 old_call = dependant.call
81
82 def _sentry_call(*args, **kwargs):
83 # type: (*Any, **Any) -> Any
84 hub = Hub.current
85 with hub.configure_scope() as sentry_scope:
86 if sentry_scope.profile is not None:
87 sentry_scope.profile.update_active_thread_id()
88 return old_call(*args, **kwargs)
89
90 dependant.call = _sentry_call
91
92 old_app = old_get_request_handler(*args, **kwargs)
93
94 async def _sentry_app(*args, **kwargs):
95 # type: (*Any, **Any) -> Any
96 hub = Hub.current
97 integration = hub.get_integration(FastApiIntegration)
98 if integration is None:
99 return await old_app(*args, **kwargs)
100
101 with hub.configure_scope() as sentry_scope:
102 request = args[0]
103
104 _set_transaction_name_and_source(
105 sentry_scope, integration.transaction_style, request
106 )
107
108 extractor = StarletteRequestExtractor(request)
109 info = await extractor.extract_request_info()
110
111 def _make_request_event_processor(req, integration):
112 # type: (Any, Any) -> Callable[[Dict[str, Any], Dict[str, Any]], Dict[str, Any]]
113 def event_processor(event, hint):
114 # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
115
116 # Extract information from request
117 request_info = event.get("request", {})
118 if info:
119 if "cookies" in info and _should_send_default_pii():
120 request_info["cookies"] = info["cookies"]
121 if "data" in info:
122 request_info["data"] = info["data"]
123 event["request"] = deepcopy(request_info)
124
125 return event
126
127 return event_processor
128
129 sentry_scope._name = FastApiIntegration.identifier
130 sentry_scope.add_event_processor(
131 _make_request_event_processor(request, integration)
132 )
133
134 return await old_app(*args, **kwargs)
135
136 return _sentry_app
137
138 fastapi.routing.get_request_handler = _sentry_get_request_handler
139
[end of sentry_sdk/integrations/fastapi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sentry_sdk/integrations/fastapi.py b/sentry_sdk/integrations/fastapi.py
--- a/sentry_sdk/integrations/fastapi.py
+++ b/sentry_sdk/integrations/fastapi.py
@@ -1,6 +1,7 @@
import asyncio
from copy import deepcopy
+from sentry_sdk._functools import wraps
from sentry_sdk._types import TYPE_CHECKING
from sentry_sdk.hub import Hub, _should_send_default_pii
from sentry_sdk.integrations import DidNotEnable
@@ -79,6 +80,7 @@
):
old_call = dependant.call
+ @wraps(old_call)
def _sentry_call(*args, **kwargs):
# type: (*Any, **Any) -> Any
hub = Hub.current
| {"golden_diff": "diff --git a/sentry_sdk/integrations/fastapi.py b/sentry_sdk/integrations/fastapi.py\n--- a/sentry_sdk/integrations/fastapi.py\n+++ b/sentry_sdk/integrations/fastapi.py\n@@ -1,6 +1,7 @@\n import asyncio\n from copy import deepcopy\n \n+from sentry_sdk._functools import wraps\n from sentry_sdk._types import TYPE_CHECKING\n from sentry_sdk.hub import Hub, _should_send_default_pii\n from sentry_sdk.integrations import DidNotEnable\n@@ -79,6 +80,7 @@\n ):\n old_call = dependant.call\n \n+ @wraps(old_call)\n def _sentry_call(*args, **kwargs):\n # type: (*Any, **Any) -> Any\n hub = Hub.current\n", "issue": "FastApiIntegration hides request handler function name\n### How do you use Sentry?\n\nSentry Saas (sentry.io)\n\n### Version\n\n1.31.0\n\n### Steps to Reproduce\n\nUse the python sentry sdk in conjunction with FastAPI and the NewRelic python agent. \r\nView transactions in the NewRelic UI. \n\n### Expected Result\n\nThe NewRelic transaction name should show the function name handling the FastAPI route. \n\n### Actual Result\n\nNew Relic services representing our new fastapi services are seeing significant time spent in a transaction named `sentry_sdk.integrations.fastapi:patch_get_request_handler.<locals>._sentry_get_request_handler.<locals>._sentry_call`.\r\n\r\n\r\n\r\nThe source code for `fastspi:patch_get_request_handler` wraps non-async request handlers in a local function called `_sentry_call`. This local function shares the same `__qualname__` responsible for the significant transaction time. The result is every route handler has the same name and it appears in New Relic that the service has one transaction type. \r\n\r\nSentry sdk integrations wrap functions all the time to create sentry scope and yet we never have this problem. It appears the issue comes from a simple oversight when writing the fastapi integration. Other integrations use `sentry_sdk._functools:wraps` to wrap a function with sentry specific behavior, yet ensure the name, qualname, etc. are retrained from the wrapped function. \n", "before_files": [{"content": "import asyncio\nfrom copy import deepcopy\n\nfrom sentry_sdk._types import TYPE_CHECKING\nfrom sentry_sdk.hub import Hub, _should_send_default_pii\nfrom sentry_sdk.integrations import DidNotEnable\nfrom sentry_sdk.tracing import SOURCE_FOR_STYLE, TRANSACTION_SOURCE_ROUTE\nfrom sentry_sdk.utils import transaction_from_function, logger\n\nif TYPE_CHECKING:\n from typing import Any, Callable, Dict\n from sentry_sdk.scope import Scope\n\ntry:\n from sentry_sdk.integrations.starlette import (\n StarletteIntegration,\n StarletteRequestExtractor,\n )\nexcept DidNotEnable:\n raise DidNotEnable(\"Starlette is not installed\")\n\ntry:\n import fastapi # type: ignore\nexcept ImportError:\n raise DidNotEnable(\"FastAPI is not installed\")\n\n\n_DEFAULT_TRANSACTION_NAME = \"generic FastAPI request\"\n\n\nclass FastApiIntegration(StarletteIntegration):\n identifier = \"fastapi\"\n\n @staticmethod\n def setup_once():\n # type: () -> None\n patch_get_request_handler()\n\n\ndef _set_transaction_name_and_source(scope, transaction_style, request):\n # type: (Scope, str, Any) -> None\n name = \"\"\n\n if transaction_style == \"endpoint\":\n endpoint = request.scope.get(\"endpoint\")\n if endpoint:\n name = transaction_from_function(endpoint) or \"\"\n\n elif transaction_style == \"url\":\n route = request.scope.get(\"route\")\n if route:\n path = getattr(route, \"path\", None)\n if path is not None:\n name = path\n\n if not name:\n name = _DEFAULT_TRANSACTION_NAME\n source = TRANSACTION_SOURCE_ROUTE\n else:\n source = SOURCE_FOR_STYLE[transaction_style]\n\n scope.set_transaction_name(name, source=source)\n logger.debug(\n \"[FastAPI] Set transaction name and source on scope: %s / %s\", name, source\n )\n\n\ndef patch_get_request_handler():\n # type: () -> None\n old_get_request_handler = fastapi.routing.get_request_handler\n\n def _sentry_get_request_handler(*args, **kwargs):\n # type: (*Any, **Any) -> Any\n dependant = kwargs.get(\"dependant\")\n if (\n dependant\n and dependant.call is not None\n and not asyncio.iscoroutinefunction(dependant.call)\n ):\n old_call = dependant.call\n\n def _sentry_call(*args, **kwargs):\n # type: (*Any, **Any) -> Any\n hub = Hub.current\n with hub.configure_scope() as sentry_scope:\n if sentry_scope.profile is not None:\n sentry_scope.profile.update_active_thread_id()\n return old_call(*args, **kwargs)\n\n dependant.call = _sentry_call\n\n old_app = old_get_request_handler(*args, **kwargs)\n\n async def _sentry_app(*args, **kwargs):\n # type: (*Any, **Any) -> Any\n hub = Hub.current\n integration = hub.get_integration(FastApiIntegration)\n if integration is None:\n return await old_app(*args, **kwargs)\n\n with hub.configure_scope() as sentry_scope:\n request = args[0]\n\n _set_transaction_name_and_source(\n sentry_scope, integration.transaction_style, request\n )\n\n extractor = StarletteRequestExtractor(request)\n info = await extractor.extract_request_info()\n\n def _make_request_event_processor(req, integration):\n # type: (Any, Any) -> Callable[[Dict[str, Any], Dict[str, Any]], Dict[str, Any]]\n def event_processor(event, hint):\n # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]\n\n # Extract information from request\n request_info = event.get(\"request\", {})\n if info:\n if \"cookies\" in info and _should_send_default_pii():\n request_info[\"cookies\"] = info[\"cookies\"]\n if \"data\" in info:\n request_info[\"data\"] = info[\"data\"]\n event[\"request\"] = deepcopy(request_info)\n\n return event\n\n return event_processor\n\n sentry_scope._name = FastApiIntegration.identifier\n sentry_scope.add_event_processor(\n _make_request_event_processor(request, integration)\n )\n\n return await old_app(*args, **kwargs)\n\n return _sentry_app\n\n fastapi.routing.get_request_handler = _sentry_get_request_handler\n", "path": "sentry_sdk/integrations/fastapi.py"}]} | 2,178 | 178 |
gh_patches_debug_18754 | rasdani/github-patches | git_diff | streamlink__streamlink-4467 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove `streamlink.plugin.api.utils.itertags`
[`streamlink.plugin.api.utils.itertags`](https://github.com/streamlink/streamlink/blob/3.2.0/src/streamlink/plugin/api/utils.py#L16-L28) has become obsolete ever since `lxml` was added as a dependency to Streamlink for parsing HTML.
`itertags` is a hacky implementation via regexes, which is not only slow, but it's also impossible to correctly parse HTML nodes with regular expressions, so it shouldn't be used when better and much faster solutions are available. It also always requires unescaping tag values, which is annoying.
We've already updated and replaced lots of plugins which were previously using it, but there are still some left:
```
$ GIT_PAGER=cat git grep -F 'from streamlink.plugin.api.utils import' a1ce471f
a1ce471f:src/streamlink/plugins/cdnbg.py:from streamlink.plugin.api.utils import itertags
a1ce471f:src/streamlink/plugins/facebook.py:from streamlink.plugin.api.utils import itertags
a1ce471f:src/streamlink/plugins/funimationnow.py:from streamlink.plugin.api.utils import itertags
a1ce471f:src/streamlink/plugins/senategov.py:from streamlink.plugin.api.utils import itertags
a1ce471f:src/streamlink/plugins/vrtbe.py:from streamlink.plugin.api.utils import itertags
a1ce471f:tests/test_plugin_utils.py:from streamlink.plugin.api.utils import itertags
```
- [x] cdnbg
- [x] facebook
- [x] funimationnow
- [x] senategov
- [x] vrtbe
Once every last plugin has been updated, the entire `streamlink.plugin.api.utils` module can be removed, as it only contains the `itertags` function and some other useless export aliases which are not even used anymore in Streamlink's codebase.
If we care about plugin-API stability (something which has never been discussed), removing this would be considered a breaking change. Since we've just dropped py36, that's something which could be included in the 4.0.0 release.
</issue>
<code>
[start of src/streamlink/plugin/api/utils.py]
1 """Useful wrappers and other tools."""
2 import re
3 from collections import namedtuple
4
5 from streamlink.utils.parse import parse_json, parse_qsd as parse_query, parse_xml
6
7 __all__ = ["parse_json", "parse_xml", "parse_query"]
8
9
10 tag_re = re.compile(r'''(?=<(?P<tag>[a-zA-Z]+)(?P<attr>.*?)(?P<end>/)?>(?:(?P<inner>.*?)</\s*(?P=tag)\s*>)?)''',
11 re.MULTILINE | re.DOTALL)
12 attr_re = re.compile(r'''\s*(?P<key>[\w-]+)\s*(?:=\s*(?P<quote>["']?)(?P<value>.*?)(?P=quote)\s*)?''')
13 Tag = namedtuple("Tag", "tag attributes text")
14
15
16 def itertags(html, tag):
17 """
18 Brute force regex based HTML tag parser. This is a rough-and-ready searcher to find HTML tags when
19 standards compliance is not required. Will find tags that are commented out, or inside script tag etc.
20
21 :param html: HTML page
22 :param tag: tag name to find
23 :return: generator with Tags
24 """
25 for match in tag_re.finditer(html):
26 if match.group("tag") == tag:
27 attrs = {a.group("key").lower(): a.group("value") for a in attr_re.finditer(match.group("attr"))}
28 yield Tag(match.group("tag"), attrs, match.group("inner"))
29
[end of src/streamlink/plugin/api/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugin/api/utils.py b/src/streamlink/plugin/api/utils.py
deleted file mode 100644
--- a/src/streamlink/plugin/api/utils.py
+++ /dev/null
@@ -1,28 +0,0 @@
-"""Useful wrappers and other tools."""
-import re
-from collections import namedtuple
-
-from streamlink.utils.parse import parse_json, parse_qsd as parse_query, parse_xml
-
-__all__ = ["parse_json", "parse_xml", "parse_query"]
-
-
-tag_re = re.compile(r'''(?=<(?P<tag>[a-zA-Z]+)(?P<attr>.*?)(?P<end>/)?>(?:(?P<inner>.*?)</\s*(?P=tag)\s*>)?)''',
- re.MULTILINE | re.DOTALL)
-attr_re = re.compile(r'''\s*(?P<key>[\w-]+)\s*(?:=\s*(?P<quote>["']?)(?P<value>.*?)(?P=quote)\s*)?''')
-Tag = namedtuple("Tag", "tag attributes text")
-
-
-def itertags(html, tag):
- """
- Brute force regex based HTML tag parser. This is a rough-and-ready searcher to find HTML tags when
- standards compliance is not required. Will find tags that are commented out, or inside script tag etc.
-
- :param html: HTML page
- :param tag: tag name to find
- :return: generator with Tags
- """
- for match in tag_re.finditer(html):
- if match.group("tag") == tag:
- attrs = {a.group("key").lower(): a.group("value") for a in attr_re.finditer(match.group("attr"))}
- yield Tag(match.group("tag"), attrs, match.group("inner"))
| {"golden_diff": "diff --git a/src/streamlink/plugin/api/utils.py b/src/streamlink/plugin/api/utils.py\ndeleted file mode 100644\n--- a/src/streamlink/plugin/api/utils.py\n+++ /dev/null\n@@ -1,28 +0,0 @@\n-\"\"\"Useful wrappers and other tools.\"\"\"\n-import re\n-from collections import namedtuple\n-\n-from streamlink.utils.parse import parse_json, parse_qsd as parse_query, parse_xml\n-\n-__all__ = [\"parse_json\", \"parse_xml\", \"parse_query\"]\n-\n-\n-tag_re = re.compile(r'''(?=<(?P<tag>[a-zA-Z]+)(?P<attr>.*?)(?P<end>/)?>(?:(?P<inner>.*?)</\\s*(?P=tag)\\s*>)?)''',\n- re.MULTILINE | re.DOTALL)\n-attr_re = re.compile(r'''\\s*(?P<key>[\\w-]+)\\s*(?:=\\s*(?P<quote>[\"']?)(?P<value>.*?)(?P=quote)\\s*)?''')\n-Tag = namedtuple(\"Tag\", \"tag attributes text\")\n-\n-\n-def itertags(html, tag):\n- \"\"\"\n- Brute force regex based HTML tag parser. This is a rough-and-ready searcher to find HTML tags when\n- standards compliance is not required. Will find tags that are commented out, or inside script tag etc.\n-\n- :param html: HTML page\n- :param tag: tag name to find\n- :return: generator with Tags\n- \"\"\"\n- for match in tag_re.finditer(html):\n- if match.group(\"tag\") == tag:\n- attrs = {a.group(\"key\").lower(): a.group(\"value\") for a in attr_re.finditer(match.group(\"attr\"))}\n- yield Tag(match.group(\"tag\"), attrs, match.group(\"inner\"))\n", "issue": "Remove `streamlink.plugin.api.utils.itertags`\n[`streamlink.plugin.api.utils.itertags`](https://github.com/streamlink/streamlink/blob/3.2.0/src/streamlink/plugin/api/utils.py#L16-L28) has become obsolete ever since `lxml` was added as a dependency to Streamlink for parsing HTML.\r\n\r\n`itertags` is a hacky implementation via regexes, which is not only slow, but it's also impossible to correctly parse HTML nodes with regular expressions, so it shouldn't be used when better and much faster solutions are available. It also always requires unescaping tag values, which is annoying.\r\n\r\nWe've already updated and replaced lots of plugins which were previously using it, but there are still some left:\r\n```\r\n$ GIT_PAGER=cat git grep -F 'from streamlink.plugin.api.utils import' a1ce471f\r\na1ce471f:src/streamlink/plugins/cdnbg.py:from streamlink.plugin.api.utils import itertags\r\na1ce471f:src/streamlink/plugins/facebook.py:from streamlink.plugin.api.utils import itertags\r\na1ce471f:src/streamlink/plugins/funimationnow.py:from streamlink.plugin.api.utils import itertags\r\na1ce471f:src/streamlink/plugins/senategov.py:from streamlink.plugin.api.utils import itertags\r\na1ce471f:src/streamlink/plugins/vrtbe.py:from streamlink.plugin.api.utils import itertags\r\na1ce471f:tests/test_plugin_utils.py:from streamlink.plugin.api.utils import itertags\r\n```\r\n\r\n- [x] cdnbg\r\n- [x] facebook\r\n- [x] funimationnow\r\n- [x] senategov\r\n- [x] vrtbe\r\n\r\nOnce every last plugin has been updated, the entire `streamlink.plugin.api.utils` module can be removed, as it only contains the `itertags` function and some other useless export aliases which are not even used anymore in Streamlink's codebase.\r\n\r\nIf we care about plugin-API stability (something which has never been discussed), removing this would be considered a breaking change. Since we've just dropped py36, that's something which could be included in the 4.0.0 release.\n", "before_files": [{"content": "\"\"\"Useful wrappers and other tools.\"\"\"\nimport re\nfrom collections import namedtuple\n\nfrom streamlink.utils.parse import parse_json, parse_qsd as parse_query, parse_xml\n\n__all__ = [\"parse_json\", \"parse_xml\", \"parse_query\"]\n\n\ntag_re = re.compile(r'''(?=<(?P<tag>[a-zA-Z]+)(?P<attr>.*?)(?P<end>/)?>(?:(?P<inner>.*?)</\\s*(?P=tag)\\s*>)?)''',\n re.MULTILINE | re.DOTALL)\nattr_re = re.compile(r'''\\s*(?P<key>[\\w-]+)\\s*(?:=\\s*(?P<quote>[\"']?)(?P<value>.*?)(?P=quote)\\s*)?''')\nTag = namedtuple(\"Tag\", \"tag attributes text\")\n\n\ndef itertags(html, tag):\n \"\"\"\n Brute force regex based HTML tag parser. This is a rough-and-ready searcher to find HTML tags when\n standards compliance is not required. Will find tags that are commented out, or inside script tag etc.\n\n :param html: HTML page\n :param tag: tag name to find\n :return: generator with Tags\n \"\"\"\n for match in tag_re.finditer(html):\n if match.group(\"tag\") == tag:\n attrs = {a.group(\"key\").lower(): a.group(\"value\") for a in attr_re.finditer(match.group(\"attr\"))}\n yield Tag(match.group(\"tag\"), attrs, match.group(\"inner\"))\n", "path": "src/streamlink/plugin/api/utils.py"}]} | 1,414 | 407 |
gh_patches_debug_3218 | rasdani/github-patches | git_diff | translate__pootle-6745 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Init from templates doesnt create directories correctly
### Steps to reproduce:
- create a new project which has subdirectories
- load templates from fs
- init a new tp from admin ui
### Results (Expected/Actual):
- new dirs/stores are created, but the dirs dont have the correct link to their tp
### Environment (i.e. 'pootle --version', DB, OS, Browser):
2.9
</issue>
<code>
[start of pootle/apps/pootle_translationproject/models.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 import logging
10 import posixpath
11 from pathlib import PurePosixPath
12
13 from django.contrib.contenttypes.fields import GenericRelation
14 from django.db import models
15 from django.urls import reverse
16 from django.utils.functional import cached_property
17
18 from pootle.core.delegate import data_tool
19 from pootle.core.mixins import CachedTreeItem
20 from pootle.core.url_helpers import get_editor_filter, split_pootle_path
21 from pootle_app.models.directory import Directory
22 from pootle_checks.constants import EXCLUDED_FILTERS
23 from pootle_language.models import Language
24 from pootle_project.models import Project
25 from pootle_revision.models import Revision
26 from staticpages.models import StaticPage
27
28
29 logger = logging.getLogger(__name__)
30
31
32 class TranslationProjectManager(models.Manager):
33
34 def get_terminology_project(self, language_id):
35 # FIXME: the code below currently uses the same approach to determine
36 # the 'terminology' kind of a project as 'Project.is_terminology()',
37 # which means it checks the value of 'checkstyle' field
38 # (see pootle_project/models.py:240).
39 #
40 # This should probably be replaced in the future with a dedicated
41 # project property.
42 return self.get(language=language_id,
43 project__checkstyle='terminology')
44
45 def live(self):
46 """Filters translation projects that have non-obsolete directories."""
47 return self.filter(directory__obsolete=False)
48
49 def for_user(self, user, select_related=None):
50 """Filters translation projects for a specific user.
51
52 - Admins always get all translation projects.
53 - Regular users only get enabled translation projects
54 accessible to them.
55
56 :param user: The user for whom the translation projects need to be
57 retrieved for.
58 :return: A filtered queryset with `TranslationProject`s for `user`.
59 """
60 qs = self.live()
61 if select_related is not None:
62 qs = qs.select_related(*select_related)
63
64 if user.is_superuser:
65 return qs
66
67 return qs.filter(
68 project__disabled=False,
69 project__code__in=Project.accessible_by_user(user))
70
71 def get_for_user(self, user, project_code, language_code,
72 select_related=None):
73 """Gets a `language_code`/`project_code` translation project
74 for a specific `user`.
75
76 - Admins can get the translation project even
77 if its project is disabled.
78 - Regular users only get a translation project
79 if its project isn't disabled and it is accessible to them.
80
81 :param user: The user for whom the translation project needs
82 to be retrieved.
83 :param project_code: The code of a project for the TP to retrieve.
84 :param language_code: The code of the language fro the TP to retrieve.
85 :return: The `TranslationProject` matching the params, raises
86 otherwise.
87 """
88 return self.for_user(
89 user, select_related).get(
90 project__code=project_code,
91 language__code=language_code)
92
93
94 class TranslationProject(models.Model, CachedTreeItem):
95
96 language = models.ForeignKey(
97 Language, db_index=False, on_delete=models.CASCADE)
98 project = models.ForeignKey(
99 Project, db_index=True, on_delete=models.CASCADE)
100 directory = models.OneToOneField(
101 Directory, db_index=True, editable=False, on_delete=models.CASCADE)
102 pootle_path = models.CharField(max_length=255, null=False, unique=True,
103 db_index=True, editable=False)
104 creation_time = models.DateTimeField(auto_now_add=True, db_index=True,
105 editable=False, null=True)
106 revisions = GenericRelation(Revision)
107
108 objects = TranslationProjectManager()
109
110 class Meta(object):
111 unique_together = (
112 ('language', 'project'),
113 ('project', 'language'))
114 db_table = 'pootle_app_translationproject'
115 # disabled objects are hidden for related objects too
116 base_manager_name = 'objects'
117
118 @cached_property
119 def code(self):
120 return u'-'.join([self.language.code, self.project.code])
121
122 @cached_property
123 def data_tool(self):
124 return data_tool.get(self.__class__)(self)
125
126 # # # # # # # # # # # # # # Properties # # # # # # # # # # # # # # # # # #
127
128 @property
129 def name(self):
130 # TODO: See if `self.fullname` can be removed
131 return self.fullname
132
133 @property
134 def fullname(self):
135 return "%s [%s]" % (self.project.fullname, self.language.name)
136
137 @property
138 def checker(self):
139 from translate.filters import checks
140 checkerclasses = [
141 checks.projectcheckers.get(
142 self.project.checkstyle,
143 checks.StandardChecker)]
144 return checks.TeeChecker(checkerclasses=checkerclasses,
145 excludefilters=EXCLUDED_FILTERS,
146 errorhandler=self.filtererrorhandler,
147 languagecode=self.language.code)
148
149 @property
150 def disabled(self):
151 return self.project.disabled
152
153 @cached_property
154 def templates_tp(self):
155 return self.project.get_template_translationproject()
156
157 @property
158 def is_template_project(self):
159 return self == self.templates_tp
160
161 # # # # # # # # # # # # # # Methods # # # # # # # # # # # # # # # # # # #
162
163 def __unicode__(self):
164 return self.pootle_path
165
166 def __init__(self, *args, **kwargs):
167 super(TranslationProject, self).__init__(*args, **kwargs)
168
169 def save(self, *args, **kwargs):
170 self.directory = (
171 self.language.directory.get_or_make_subdir(self.project.code))
172 self.pootle_path = self.directory.pootle_path
173 super(TranslationProject, self).save(*args, **kwargs)
174 if self.directory.tp_id != self.pk:
175 self.directory.tp = self
176 self.directory.save()
177
178 def delete(self, *args, **kwargs):
179 directory = self.directory
180
181 super(TranslationProject, self).delete(*args, **kwargs)
182 directory.delete()
183
184 def get_absolute_url(self):
185 return reverse(
186 'pootle-tp-browse',
187 args=split_pootle_path(self.pootle_path)[:-1])
188
189 def get_translate_url(self, **kwargs):
190 return u''.join(
191 [reverse("pootle-tp-translate",
192 args=split_pootle_path(self.pootle_path)[:-1]),
193 get_editor_filter(**kwargs)])
194
195 def get_announcement(self, user=None):
196 """Return the related announcement, if any."""
197 return StaticPage.get_announcement_for(self.pootle_path, user)
198
199 def filtererrorhandler(self, functionname, str1, str2, e):
200 logger.error(
201 u"Error in filter %s: %r, %r, %s",
202 functionname,
203 str1,
204 str2, e)
205 return False
206
207 def is_accessible_by(self, user):
208 """Returns `True` if the current translation project is accessible
209 by `user`.
210 """
211 if user.is_superuser:
212 return True
213
214 return self.project.code in Project.accessible_by_user(user)
215
216 def can_be_inited_from_templates(self):
217 """Returns `True` if the current translation project hasn't been
218 saved yet and can be initialized from templates.
219 """
220
221 # This method checks if the current translation project directory
222 # doesn't exist. So it won't work if the translation project is already
223 # saved the database because the translation project directory is
224 # auto-created in `save()` method.
225 return (
226 not self.is_template_project
227 and self.templates_tp is not None)
228
229 def create_parent_dirs(self, pootle_path):
230 parent = self.directory
231 dirs_to_create = []
232 for path in PurePosixPath(pootle_path).parents:
233 path = posixpath.join(str(path), "")
234 if path == self.pootle_path:
235 break
236 dirs_to_create.append(path)
237 for path in reversed(dirs_to_create):
238 parent, __ = Directory.objects.get_or_create(
239 pootle_path=path,
240 parent=parent,
241 name=posixpath.basename(path.rstrip("/")))
242 return parent
243
244 def init_store_from_template(self, template_store):
245 """Initialize a new file for `self` using `template_store`.
246 """
247 pootle_path = posixpath.join(
248 self.pootle_path.rstrip("/"),
249 template_store.tp_path.lstrip("/"))
250 pootle_path = ".".join(
251 [posixpath.splitext(pootle_path)[0],
252 template_store.filetype.extension.name])
253 name = posixpath.basename(pootle_path)
254 if name in ["template.po", "templates.po"]:
255 # gnu-style layout
256 # use language code instead of template name
257 name = ".".join(
258 [self.language.code,
259 template_store.filetype.extension.name])
260 dirname = posixpath.dirname(pootle_path)
261 pootle_path = posixpath.join(dirname, name)
262 if not self.stores.filter(pootle_path=pootle_path).exists():
263 return self.stores.create(
264 parent=self.create_parent_dirs(pootle_path),
265 pootle_path=pootle_path,
266 name=name)
267
268 def init_from_templates(self):
269 """Initializes the current translation project files using
270 the templates TP ones.
271 """
272 template_stores = self.templates_tp.stores.live().select_related(
273 "filetype__template_extension",
274 "filetype__extension").order_by("creation_time")
275 for template_store in template_stores.iterator():
276 new_store = self.init_store_from_template(template_store)
277 if new_store:
278 new_store.update(
279 new_store.deserialize(template_store.serialize()))
280
281 # # # TreeItem
282 def get_children(self):
283 return self.directory.children
284
285 def get_parents(self):
286 return [self.project]
287
[end of pootle/apps/pootle_translationproject/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/apps/pootle_translationproject/models.py b/pootle/apps/pootle_translationproject/models.py
--- a/pootle/apps/pootle_translationproject/models.py
+++ b/pootle/apps/pootle_translationproject/models.py
@@ -238,6 +238,7 @@
parent, __ = Directory.objects.get_or_create(
pootle_path=path,
parent=parent,
+ tp=self,
name=posixpath.basename(path.rstrip("/")))
return parent
| {"golden_diff": "diff --git a/pootle/apps/pootle_translationproject/models.py b/pootle/apps/pootle_translationproject/models.py\n--- a/pootle/apps/pootle_translationproject/models.py\n+++ b/pootle/apps/pootle_translationproject/models.py\n@@ -238,6 +238,7 @@\n parent, __ = Directory.objects.get_or_create(\n pootle_path=path,\n parent=parent,\n+ tp=self,\n name=posixpath.basename(path.rstrip(\"/\")))\n return parent\n", "issue": "Init from templates doesnt create directories correctly\n### Steps to reproduce:\r\n\r\n- create a new project which has subdirectories\r\n- load templates from fs\r\n- init a new tp from admin ui\r\n\r\n### Results (Expected/Actual):\r\n\r\n- new dirs/stores are created, but the dirs dont have the correct link to their tp\r\n\r\n### Environment (i.e. 'pootle --version', DB, OS, Browser):\r\n\r\n2.9\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport logging\nimport posixpath\nfrom pathlib import PurePosixPath\n\nfrom django.contrib.contenttypes.fields import GenericRelation\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\n\nfrom pootle.core.delegate import data_tool\nfrom pootle.core.mixins import CachedTreeItem\nfrom pootle.core.url_helpers import get_editor_filter, split_pootle_path\nfrom pootle_app.models.directory import Directory\nfrom pootle_checks.constants import EXCLUDED_FILTERS\nfrom pootle_language.models import Language\nfrom pootle_project.models import Project\nfrom pootle_revision.models import Revision\nfrom staticpages.models import StaticPage\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass TranslationProjectManager(models.Manager):\n\n def get_terminology_project(self, language_id):\n # FIXME: the code below currently uses the same approach to determine\n # the 'terminology' kind of a project as 'Project.is_terminology()',\n # which means it checks the value of 'checkstyle' field\n # (see pootle_project/models.py:240).\n #\n # This should probably be replaced in the future with a dedicated\n # project property.\n return self.get(language=language_id,\n project__checkstyle='terminology')\n\n def live(self):\n \"\"\"Filters translation projects that have non-obsolete directories.\"\"\"\n return self.filter(directory__obsolete=False)\n\n def for_user(self, user, select_related=None):\n \"\"\"Filters translation projects for a specific user.\n\n - Admins always get all translation projects.\n - Regular users only get enabled translation projects\n accessible to them.\n\n :param user: The user for whom the translation projects need to be\n retrieved for.\n :return: A filtered queryset with `TranslationProject`s for `user`.\n \"\"\"\n qs = self.live()\n if select_related is not None:\n qs = qs.select_related(*select_related)\n\n if user.is_superuser:\n return qs\n\n return qs.filter(\n project__disabled=False,\n project__code__in=Project.accessible_by_user(user))\n\n def get_for_user(self, user, project_code, language_code,\n select_related=None):\n \"\"\"Gets a `language_code`/`project_code` translation project\n for a specific `user`.\n\n - Admins can get the translation project even\n if its project is disabled.\n - Regular users only get a translation project\n if its project isn't disabled and it is accessible to them.\n\n :param user: The user for whom the translation project needs\n to be retrieved.\n :param project_code: The code of a project for the TP to retrieve.\n :param language_code: The code of the language fro the TP to retrieve.\n :return: The `TranslationProject` matching the params, raises\n otherwise.\n \"\"\"\n return self.for_user(\n user, select_related).get(\n project__code=project_code,\n language__code=language_code)\n\n\nclass TranslationProject(models.Model, CachedTreeItem):\n\n language = models.ForeignKey(\n Language, db_index=False, on_delete=models.CASCADE)\n project = models.ForeignKey(\n Project, db_index=True, on_delete=models.CASCADE)\n directory = models.OneToOneField(\n Directory, db_index=True, editable=False, on_delete=models.CASCADE)\n pootle_path = models.CharField(max_length=255, null=False, unique=True,\n db_index=True, editable=False)\n creation_time = models.DateTimeField(auto_now_add=True, db_index=True,\n editable=False, null=True)\n revisions = GenericRelation(Revision)\n\n objects = TranslationProjectManager()\n\n class Meta(object):\n unique_together = (\n ('language', 'project'),\n ('project', 'language'))\n db_table = 'pootle_app_translationproject'\n # disabled objects are hidden for related objects too\n base_manager_name = 'objects'\n\n @cached_property\n def code(self):\n return u'-'.join([self.language.code, self.project.code])\n\n @cached_property\n def data_tool(self):\n return data_tool.get(self.__class__)(self)\n\n # # # # # # # # # # # # # # Properties # # # # # # # # # # # # # # # # # #\n\n @property\n def name(self):\n # TODO: See if `self.fullname` can be removed\n return self.fullname\n\n @property\n def fullname(self):\n return \"%s [%s]\" % (self.project.fullname, self.language.name)\n\n @property\n def checker(self):\n from translate.filters import checks\n checkerclasses = [\n checks.projectcheckers.get(\n self.project.checkstyle,\n checks.StandardChecker)]\n return checks.TeeChecker(checkerclasses=checkerclasses,\n excludefilters=EXCLUDED_FILTERS,\n errorhandler=self.filtererrorhandler,\n languagecode=self.language.code)\n\n @property\n def disabled(self):\n return self.project.disabled\n\n @cached_property\n def templates_tp(self):\n return self.project.get_template_translationproject()\n\n @property\n def is_template_project(self):\n return self == self.templates_tp\n\n # # # # # # # # # # # # # # Methods # # # # # # # # # # # # # # # # # # #\n\n def __unicode__(self):\n return self.pootle_path\n\n def __init__(self, *args, **kwargs):\n super(TranslationProject, self).__init__(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.directory = (\n self.language.directory.get_or_make_subdir(self.project.code))\n self.pootle_path = self.directory.pootle_path\n super(TranslationProject, self).save(*args, **kwargs)\n if self.directory.tp_id != self.pk:\n self.directory.tp = self\n self.directory.save()\n\n def delete(self, *args, **kwargs):\n directory = self.directory\n\n super(TranslationProject, self).delete(*args, **kwargs)\n directory.delete()\n\n def get_absolute_url(self):\n return reverse(\n 'pootle-tp-browse',\n args=split_pootle_path(self.pootle_path)[:-1])\n\n def get_translate_url(self, **kwargs):\n return u''.join(\n [reverse(\"pootle-tp-translate\",\n args=split_pootle_path(self.pootle_path)[:-1]),\n get_editor_filter(**kwargs)])\n\n def get_announcement(self, user=None):\n \"\"\"Return the related announcement, if any.\"\"\"\n return StaticPage.get_announcement_for(self.pootle_path, user)\n\n def filtererrorhandler(self, functionname, str1, str2, e):\n logger.error(\n u\"Error in filter %s: %r, %r, %s\",\n functionname,\n str1,\n str2, e)\n return False\n\n def is_accessible_by(self, user):\n \"\"\"Returns `True` if the current translation project is accessible\n by `user`.\n \"\"\"\n if user.is_superuser:\n return True\n\n return self.project.code in Project.accessible_by_user(user)\n\n def can_be_inited_from_templates(self):\n \"\"\"Returns `True` if the current translation project hasn't been\n saved yet and can be initialized from templates.\n \"\"\"\n\n # This method checks if the current translation project directory\n # doesn't exist. So it won't work if the translation project is already\n # saved the database because the translation project directory is\n # auto-created in `save()` method.\n return (\n not self.is_template_project\n and self.templates_tp is not None)\n\n def create_parent_dirs(self, pootle_path):\n parent = self.directory\n dirs_to_create = []\n for path in PurePosixPath(pootle_path).parents:\n path = posixpath.join(str(path), \"\")\n if path == self.pootle_path:\n break\n dirs_to_create.append(path)\n for path in reversed(dirs_to_create):\n parent, __ = Directory.objects.get_or_create(\n pootle_path=path,\n parent=parent,\n name=posixpath.basename(path.rstrip(\"/\")))\n return parent\n\n def init_store_from_template(self, template_store):\n \"\"\"Initialize a new file for `self` using `template_store`.\n \"\"\"\n pootle_path = posixpath.join(\n self.pootle_path.rstrip(\"/\"),\n template_store.tp_path.lstrip(\"/\"))\n pootle_path = \".\".join(\n [posixpath.splitext(pootle_path)[0],\n template_store.filetype.extension.name])\n name = posixpath.basename(pootle_path)\n if name in [\"template.po\", \"templates.po\"]:\n # gnu-style layout\n # use language code instead of template name\n name = \".\".join(\n [self.language.code,\n template_store.filetype.extension.name])\n dirname = posixpath.dirname(pootle_path)\n pootle_path = posixpath.join(dirname, name)\n if not self.stores.filter(pootle_path=pootle_path).exists():\n return self.stores.create(\n parent=self.create_parent_dirs(pootle_path),\n pootle_path=pootle_path,\n name=name)\n\n def init_from_templates(self):\n \"\"\"Initializes the current translation project files using\n the templates TP ones.\n \"\"\"\n template_stores = self.templates_tp.stores.live().select_related(\n \"filetype__template_extension\",\n \"filetype__extension\").order_by(\"creation_time\")\n for template_store in template_stores.iterator():\n new_store = self.init_store_from_template(template_store)\n if new_store:\n new_store.update(\n new_store.deserialize(template_store.serialize()))\n\n # # # TreeItem\n def get_children(self):\n return self.directory.children\n\n def get_parents(self):\n return [self.project]\n", "path": "pootle/apps/pootle_translationproject/models.py"}]} | 3,609 | 112 |
gh_patches_debug_60797 | rasdani/github-patches | git_diff | engnadeau__pybotics-751 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Create a way to add your own arm model[FEATURE]
## User Story
<!-- A clear and concise description of what the problem is.
I want to add my own arm configuration to the list of pre-trained models.
## Potential Solutions
<!-- A clear and concise description of what you want to happen. -->
If there was a comment next to each line of one of the arrays containing the pre-trained model saying what exactly each value was supposed to represent, that would help.
<!-- A clear and concise description of any alternative solutions or features you've considered. -->
I tried looking at the spec sheets of the arms and matching up values but I couldn't figure much out.
</issue>
<code>
[start of pybotics/predefined_models.py]
1 """Predefined robot models."""
2 import numpy as np # type: ignore
3
4
5 def kuka_lbr_iiwa_7() -> np.ndarray: # pragma: no cover
6 """Get KUKA LBR iiwa 7 MDH model."""
7 return np.array(
8 [
9 [0, 0, 0, 340],
10 [-np.pi / 2, 0, 0, 0],
11 [np.pi / 2, 0, 0, 400],
12 [np.pi / 2, 0, 0, 0],
13 [-np.pi / 2, 0, 0, 400],
14 [-np.pi / 2, 0, 0, 0],
15 [np.pi / 2, 0, 0, 126],
16 ]
17 )
18
19
20 def mecademic_meca500() -> np.ndarray: # pragma: no cover
21 """Get Meca500 MDH model."""
22 return np.array(
23 [
24 [0, 0, 0, 135],
25 [-np.pi / 2, 0, -np.pi / 2, 0],
26 [0, 135, 0, 0],
27 [-np.pi / 2, 38, 0, 120],
28 [np.pi / 2, 0, 0, 0],
29 [-np.pi / 2, 0, np.pi, 72],
30 ]
31 )
32
33
34 def puma560() -> np.ndarray: # pragma: no cover
35 """Get PUMA560 MDH model."""
36 return np.array(
37 [
38 [0, 0, 0, 0],
39 [-np.pi / 2, 0, 0, 0],
40 [0, 612.7, 0, 0],
41 [0, 571.6, 0, 163.9],
42 [-np.pi / 2, 0, 0, 115.7],
43 [np.pi / 2, 0, np.pi, 92.2],
44 ]
45 )
46
47
48 def ur10() -> np.ndarray: # pragma: no cover
49 """Get UR10 MDH model."""
50 return np.array(
51 [
52 [0, 0, 0, 118],
53 [np.pi / 2, 0, np.pi, 0],
54 [0, 612.7, 0, 0],
55 [0, 571.6, 0, 163.9],
56 [-np.pi / 2, 0, 0, 115.7],
57 [np.pi / 2, 0, np.pi, 92.2],
58 ]
59 )
60
61
62 def abb_irb120() -> np.ndarray: # pragma: no cover
63 """Get ABB irb120 MDH model."""
64 return np.array(
65 [
66 [0, 0, 0, 290],
67 [-np.pi / 2, 0, -np.pi / 2, 0],
68 [0, 270, 0, 0],
69 [-np.pi / 2, 70, 0, 302],
70 [np.pi / 2, 0, 0, 0],
71 [-np.pi / 2, 0, np.pi, 72],
72 ]
73 )
74
[end of pybotics/predefined_models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pybotics/predefined_models.py b/pybotics/predefined_models.py
--- a/pybotics/predefined_models.py
+++ b/pybotics/predefined_models.py
@@ -1,4 +1,8 @@
-"""Predefined robot models."""
+"""Predefined robot models.
+
+These models correspond to the Modified Denavit–Hartenberg parameters:
+https://en.wikipedia.org/wiki/Denavit%E2%80%93Hartenberg_parameters
+"""
import numpy as np # type: ignore
| {"golden_diff": "diff --git a/pybotics/predefined_models.py b/pybotics/predefined_models.py\n--- a/pybotics/predefined_models.py\n+++ b/pybotics/predefined_models.py\n@@ -1,4 +1,8 @@\n-\"\"\"Predefined robot models.\"\"\"\n+\"\"\"Predefined robot models.\n+\n+These models correspond to the Modified Denavit\u2013Hartenberg parameters:\n+https://en.wikipedia.org/wiki/Denavit%E2%80%93Hartenberg_parameters\n+\"\"\"\n import numpy as np # type: ignore\n", "issue": "Create a way to add your own arm model[FEATURE]\n## User Story\r\n\r\n<!-- A clear and concise description of what the problem is. \r\nI want to add my own arm configuration to the list of pre-trained models.\r\n\r\n## Potential Solutions\r\n\r\n<!-- A clear and concise description of what you want to happen. -->\r\nIf there was a comment next to each line of one of the arrays containing the pre-trained model saying what exactly each value was supposed to represent, that would help.\r\n<!-- A clear and concise description of any alternative solutions or features you've considered. -->\r\nI tried looking at the spec sheets of the arms and matching up values but I couldn't figure much out. \r\n\r\n\n", "before_files": [{"content": "\"\"\"Predefined robot models.\"\"\"\nimport numpy as np # type: ignore\n\n\ndef kuka_lbr_iiwa_7() -> np.ndarray: # pragma: no cover\n \"\"\"Get KUKA LBR iiwa 7 MDH model.\"\"\"\n return np.array(\n [\n [0, 0, 0, 340],\n [-np.pi / 2, 0, 0, 0],\n [np.pi / 2, 0, 0, 400],\n [np.pi / 2, 0, 0, 0],\n [-np.pi / 2, 0, 0, 400],\n [-np.pi / 2, 0, 0, 0],\n [np.pi / 2, 0, 0, 126],\n ]\n )\n\n\ndef mecademic_meca500() -> np.ndarray: # pragma: no cover\n \"\"\"Get Meca500 MDH model.\"\"\"\n return np.array(\n [\n [0, 0, 0, 135],\n [-np.pi / 2, 0, -np.pi / 2, 0],\n [0, 135, 0, 0],\n [-np.pi / 2, 38, 0, 120],\n [np.pi / 2, 0, 0, 0],\n [-np.pi / 2, 0, np.pi, 72],\n ]\n )\n\n\ndef puma560() -> np.ndarray: # pragma: no cover\n \"\"\"Get PUMA560 MDH model.\"\"\"\n return np.array(\n [\n [0, 0, 0, 0],\n [-np.pi / 2, 0, 0, 0],\n [0, 612.7, 0, 0],\n [0, 571.6, 0, 163.9],\n [-np.pi / 2, 0, 0, 115.7],\n [np.pi / 2, 0, np.pi, 92.2],\n ]\n )\n\n\ndef ur10() -> np.ndarray: # pragma: no cover\n \"\"\"Get UR10 MDH model.\"\"\"\n return np.array(\n [\n [0, 0, 0, 118],\n [np.pi / 2, 0, np.pi, 0],\n [0, 612.7, 0, 0],\n [0, 571.6, 0, 163.9],\n [-np.pi / 2, 0, 0, 115.7],\n [np.pi / 2, 0, np.pi, 92.2],\n ]\n )\n\n\ndef abb_irb120() -> np.ndarray: # pragma: no cover\n \"\"\"Get ABB irb120 MDH model.\"\"\"\n return np.array(\n [\n [0, 0, 0, 290],\n [-np.pi / 2, 0, -np.pi / 2, 0],\n [0, 270, 0, 0],\n [-np.pi / 2, 70, 0, 302],\n [np.pi / 2, 0, 0, 0],\n [-np.pi / 2, 0, np.pi, 72],\n ]\n )\n", "path": "pybotics/predefined_models.py"}]} | 1,613 | 115 |
gh_patches_debug_19312 | rasdani/github-patches | git_diff | autogluon__autogluon-417 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
About ENAS_Scheduler
While using ENAS_Scheduler
Maybe:
```
tbar.set_description('avg reward: {}'.format(self.baseline))
```
is better than
```
tbar.set_description('avg reward: {:.2f}'.format(self.baseline))
```
in run in enas_scheduler.py, because when self.baseline=None, it will report error
</issue>
<code>
[start of autogluon/contrib/enas/enas_scheduler.py]
1 import os
2 import pickle
3 import logging
4 from collections import OrderedDict
5 from multiprocessing.pool import ThreadPool
6
7 import mxnet as mx
8
9 from ...searcher import RLSearcher
10 from ...scheduler.resource import get_gpu_count, get_cpu_count
11 from ...task.image_classification.dataset import get_built_in_dataset
12 from ...task.image_classification.utils import *
13 from ...utils import (mkdir, save, load, update_params, collect_params, DataLoader, tqdm, in_ipynb)
14 from .enas_utils import *
15
16 __all__ = ['ENAS_Scheduler']
17
18 logger = logging.getLogger(__name__)
19
20 IMAGENET_TRAINING_SAMPLES = 1281167
21
22 class ENAS_Scheduler(object):
23 """ENAS Scheduler, which automatically creates LSTM controller based on the search spaces.
24 """
25 def __init__(self, supernet, train_set='imagenet', val_set=None,
26 train_fn=default_train_fn, eval_fn=default_val_fn,
27 train_args={}, val_args={}, reward_fn= default_reward_fn,
28 num_gpus=0, num_cpus=4,
29 batch_size=256, epochs=120, warmup_epochs=5,
30 controller_lr=1e-3, controller_type='lstm',
31 controller_batch_size=10, ema_baseline_decay=0.95,
32 update_arch_frequency=20, checkname='./enas/checkpoint.ag',
33 plot_frequency=0, **kwargs):
34 num_cpus = get_cpu_count() if num_cpus > get_cpu_count() else num_cpus
35 num_gpus = get_gpu_count() if num_gpus > get_gpu_count() else num_gpus
36 self.supernet = supernet
37 self.train_fn = train_fn
38 self.eval_fn = eval_fn
39 self.reward_fn = reward_fn
40 self.checkname = checkname
41 self.plot_frequency = plot_frequency
42 self.epochs = epochs
43 self.warmup_epochs = warmup_epochs
44 self.controller_batch_size = controller_batch_size
45 kwspaces = self.supernet.kwspaces
46
47 self.initialize_miscs(train_set, val_set, batch_size, num_cpus, num_gpus,
48 train_args, val_args)
49
50 # create RL searcher/controller
51 self.baseline = None
52 self.ema_decay = ema_baseline_decay
53 self.searcher = RLSearcher(kwspaces, controller_type=controller_type,
54 prefetch=4, num_workers=4)
55 # controller setup
56 self.controller = self.searcher.controller
57 self.controller_optimizer = mx.gluon.Trainer(
58 self.controller.collect_params(), 'adam',
59 optimizer_params={'learning_rate': controller_lr})
60 self.update_arch_frequency = update_arch_frequency
61 self.val_acc = 0
62 # async controller sample
63 self._worker_pool = ThreadPool(2)
64 self._data_buffer = {}
65 self._rcvd_idx = 0
66 self._sent_idx = 0
67 self._timeout = 20
68 # logging history
69 self.training_history = []
70 self._prefetch_controller()
71
72 def initialize_miscs(self, train_set, val_set, batch_size, num_cpus, num_gpus,
73 train_args, val_args):
74 """Initialize framework related miscs, such as train/val data and train/val
75 function arguments.
76 """
77 ctx = [mx.gpu(i) for i in range(num_gpus)] if num_gpus > 0 else [mx.cpu(0)]
78 self.supernet.collect_params().reset_ctx(ctx)
79 self.supernet.hybridize()
80 dataset_name = train_set
81
82 if isinstance(train_set, str):
83 train_set = get_built_in_dataset(dataset_name, train=True, batch_size=batch_size,
84 num_workers=num_cpus, shuffle=True)
85 val_set = get_built_in_dataset(dataset_name, train=False, batch_size=batch_size,
86 num_workers=num_cpus, shuffle=True)
87 if isinstance(train_set, gluon.data.Dataset):
88 self.train_data = DataLoader(
89 train_set, batch_size=batch_size, shuffle=True,
90 last_batch="discard", num_workers=num_cpus)
91 # very important, make shuffle for training contoller
92 self.val_data = DataLoader(
93 val_set, batch_size=batch_size, shuffle=True,
94 num_workers=num_cpus, prefetch=0, sample_times=self.controller_batch_size)
95 else:
96 self.train_data = train_set
97 self.val_data = val_set
98 iters_per_epoch = len(self.train_data) if hasattr(self.train_data, '__len__') else \
99 IMAGENET_TRAINING_SAMPLES // batch_size
100 self.train_args = init_default_train_args(batch_size, self.supernet, self.epochs, iters_per_epoch) \
101 if len(train_args) == 0 else train_args
102 self.val_args = val_args
103 self.val_args['ctx'] = ctx
104 self.val_args['batch_fn'] = imagenet_batch_fn if dataset_name == 'imagenet' else default_batch_fn
105 self.train_args['ctx'] = ctx
106 self.train_args['batch_fn'] = imagenet_batch_fn if dataset_name == 'imagenet' else default_batch_fn
107 self.ctx = ctx
108
109 def run(self):
110 tq = tqdm(range(self.epochs))
111 for epoch in tq:
112 # for recordio data
113 if hasattr(self.train_data, 'reset'): self.train_data.reset()
114 tbar = tqdm(self.train_data)
115 idx = 0
116 for batch in tbar:
117 # sample network configuration
118 config = self.controller.pre_sample()[0]
119 self.supernet.sample(**config)
120 # self.train_fn(self.supernet, batch, **self.train_args)
121 self.train_fn(epoch, self.epochs, self.supernet, batch, **self.train_args)
122 mx.nd.waitall()
123 if epoch >= self.warmup_epochs and (idx % self.update_arch_frequency) == 0:
124 self.train_controller()
125 if self.plot_frequency > 0 and idx % self.plot_frequency == 0 and in_ipynb():
126 graph = self.supernet.graph
127 graph.attr(rankdir='LR', size='8,3')
128 tbar.set_svg(graph._repr_svg_())
129 tbar.set_description('avg reward: {:.2f}'.format(self.baseline))
130 idx += 1
131 self.validation()
132 self.save()
133 tq.set_description('epoch {}, val_acc: {:.2f}, avg reward: {:.2f}' \
134 .format(epoch, self.val_acc, self.baseline))
135
136 def validation(self):
137 if hasattr(self.val_data, 'reset'): self.val_data.reset()
138 # data iter, avoid memory leak
139 it = iter(self.val_data)
140 if hasattr(it, 'reset_sample_times'): it.reset_sample_times()
141 tbar = tqdm(it)
142 # update network arc
143 config = self.controller.inference()
144 self.supernet.sample(**config)
145 metric = mx.metric.Accuracy()
146 for batch in tbar:
147 self.eval_fn(self.supernet, batch, metric=metric, **self.val_args)
148 reward = metric.get()[1]
149 tbar.set_description('Val Acc: {}'.format(reward))
150
151 self.val_acc = reward
152 self.training_history.append(reward)
153
154 def _sample_controller(self):
155 assert self._rcvd_idx < self._sent_idx, "rcvd_idx must be smaller than sent_idx"
156 try:
157 ret = self._data_buffer.pop(self._rcvd_idx)
158 self._rcvd_idx += 1
159 return ret.get(timeout=self._timeout)
160 except Exception:
161 self._worker_pool.terminate()
162 raise
163
164 def _prefetch_controller(self):
165 async_ret = self._worker_pool.apply_async(self._async_sample, ())
166 self._data_buffer[self._sent_idx] = async_ret
167 self._sent_idx += 1
168
169 def _async_sample(self):
170 with mx.autograd.record():
171 # sample controller_batch_size number of configurations
172 configs, log_probs, entropies = self.controller.sample(batch_size=self.controller_batch_size,
173 with_details=True)
174 return configs, log_probs, entropies
175
176 def train_controller(self):
177 """Run multiple number of trials
178 """
179 decay = self.ema_decay
180 if hasattr(self.val_data, 'reset'): self.val_data.reset()
181 # update
182 metric = mx.metric.Accuracy()
183 with mx.autograd.record():
184 # sample controller_batch_size number of configurations
185 configs, log_probs, entropies = self._sample_controller()
186 for i, batch in enumerate(self.val_data):
187 if i >= self.controller_batch_size: break
188 self.supernet.sample(**configs[i])
189 # schedule the training tasks and gather the reward
190 metric.reset()
191 self.eval_fn(self.supernet, batch, metric=metric, **self.val_args)
192 reward = metric.get()[1]
193 reward = self.reward_fn(reward, self.supernet)
194 self.baseline = reward if not self.baseline else self.baseline
195 # substract baseline
196 avg_rewards = mx.nd.array([reward - self.baseline],
197 ctx=self.controller.context)
198 # EMA baseline
199 self.baseline = decay * self.baseline + (1 - decay) * reward
200 # negative policy gradient
201 log_prob = log_probs[i]
202 log_prob = log_prob.sum()
203 loss = - log_prob * avg_rewards
204 loss = loss.sum()
205
206 # update
207 loss.backward()
208 self.controller_optimizer.step(self.controller_batch_size)
209 self._prefetch_controller()
210
211 def load(self, checkname=None):
212 checkname = checkname if checkname else self.checkname
213 state_dict = load(checkname)
214 self.load_state_dict(state_dict)
215
216 def save(self, checkname=None):
217 checkname = checkname if checkname else self.checkname
218 mkdir(os.path.dirname(checkname))
219 save(self.state_dict(), checkname)
220
221 def state_dict(self, destination=None):
222 if destination is None:
223 destination = OrderedDict()
224 destination._metadata = OrderedDict()
225 destination['supernet_params'] = collect_params(self.supernet)
226 destination['controller_params'] = collect_params(self.controller)
227 destination['training_history'] = self.training_history
228 return destination
229
230 def load_state_dict(self, state_dict):
231 update_params(self.supernet, state_dict['supernet_params'], ctx=self.ctx)
232 update_params(self.controller, state_dict['controller_params'], ctx=self.controller.context)
233 self.training_history = state_dict['training_history']
234
[end of autogluon/contrib/enas/enas_scheduler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/autogluon/contrib/enas/enas_scheduler.py b/autogluon/contrib/enas/enas_scheduler.py
--- a/autogluon/contrib/enas/enas_scheduler.py
+++ b/autogluon/contrib/enas/enas_scheduler.py
@@ -126,12 +126,15 @@
graph = self.supernet.graph
graph.attr(rankdir='LR', size='8,3')
tbar.set_svg(graph._repr_svg_())
- tbar.set_description('avg reward: {:.2f}'.format(self.baseline))
+ if self.baseline:
+ tbar.set_description('avg reward: {:.2f}'.format(self.baseline))
idx += 1
self.validation()
self.save()
- tq.set_description('epoch {}, val_acc: {:.2f}, avg reward: {:.2f}' \
- .format(epoch, self.val_acc, self.baseline))
+ msg = 'epoch {}, val_acc: {:.2f}'.format(epoch, self.val_acc)
+ if self.baseline:
+ msg += ', avg reward: {:.2f}'.format(self.baseline)
+ tq.set_description(msg)
def validation(self):
if hasattr(self.val_data, 'reset'): self.val_data.reset()
| {"golden_diff": "diff --git a/autogluon/contrib/enas/enas_scheduler.py b/autogluon/contrib/enas/enas_scheduler.py\n--- a/autogluon/contrib/enas/enas_scheduler.py\n+++ b/autogluon/contrib/enas/enas_scheduler.py\n@@ -126,12 +126,15 @@\n graph = self.supernet.graph\n graph.attr(rankdir='LR', size='8,3')\n tbar.set_svg(graph._repr_svg_())\n- tbar.set_description('avg reward: {:.2f}'.format(self.baseline))\n+ if self.baseline:\n+ tbar.set_description('avg reward: {:.2f}'.format(self.baseline))\n idx += 1\n self.validation()\n self.save()\n- tq.set_description('epoch {}, val_acc: {:.2f}, avg reward: {:.2f}' \\\n- .format(epoch, self.val_acc, self.baseline))\n+ msg = 'epoch {}, val_acc: {:.2f}'.format(epoch, self.val_acc)\n+ if self.baseline:\n+ msg += ', avg reward: {:.2f}'.format(self.baseline)\n+ tq.set_description(msg)\n \n def validation(self):\n if hasattr(self.val_data, 'reset'): self.val_data.reset()\n", "issue": "About ENAS_Scheduler\nWhile using ENAS_Scheduler\r\nMaybe\uff1a\r\n```\r\ntbar.set_description('avg reward: {}'.format(self.baseline))\r\n```\r\nis better than \r\n\r\n```\r\ntbar.set_description('avg reward: {:.2f}'.format(self.baseline))\r\n```\r\nin run in enas_scheduler.py, because when self.baseline=None, it will report error\n", "before_files": [{"content": "import os\nimport pickle\nimport logging\nfrom collections import OrderedDict\nfrom multiprocessing.pool import ThreadPool\n\nimport mxnet as mx\n\nfrom ...searcher import RLSearcher\nfrom ...scheduler.resource import get_gpu_count, get_cpu_count\nfrom ...task.image_classification.dataset import get_built_in_dataset\nfrom ...task.image_classification.utils import *\nfrom ...utils import (mkdir, save, load, update_params, collect_params, DataLoader, tqdm, in_ipynb)\nfrom .enas_utils import *\n\n__all__ = ['ENAS_Scheduler']\n\nlogger = logging.getLogger(__name__)\n\nIMAGENET_TRAINING_SAMPLES = 1281167\n\nclass ENAS_Scheduler(object):\n \"\"\"ENAS Scheduler, which automatically creates LSTM controller based on the search spaces.\n \"\"\"\n def __init__(self, supernet, train_set='imagenet', val_set=None,\n train_fn=default_train_fn, eval_fn=default_val_fn,\n train_args={}, val_args={}, reward_fn= default_reward_fn,\n num_gpus=0, num_cpus=4,\n batch_size=256, epochs=120, warmup_epochs=5,\n controller_lr=1e-3, controller_type='lstm',\n controller_batch_size=10, ema_baseline_decay=0.95,\n update_arch_frequency=20, checkname='./enas/checkpoint.ag',\n plot_frequency=0, **kwargs):\n num_cpus = get_cpu_count() if num_cpus > get_cpu_count() else num_cpus\n num_gpus = get_gpu_count() if num_gpus > get_gpu_count() else num_gpus\n self.supernet = supernet\n self.train_fn = train_fn\n self.eval_fn = eval_fn\n self.reward_fn = reward_fn\n self.checkname = checkname\n self.plot_frequency = plot_frequency\n self.epochs = epochs\n self.warmup_epochs = warmup_epochs\n self.controller_batch_size = controller_batch_size\n kwspaces = self.supernet.kwspaces\n\n self.initialize_miscs(train_set, val_set, batch_size, num_cpus, num_gpus,\n train_args, val_args)\n\n # create RL searcher/controller\n self.baseline = None\n self.ema_decay = ema_baseline_decay\n self.searcher = RLSearcher(kwspaces, controller_type=controller_type,\n prefetch=4, num_workers=4)\n # controller setup\n self.controller = self.searcher.controller\n self.controller_optimizer = mx.gluon.Trainer(\n self.controller.collect_params(), 'adam',\n optimizer_params={'learning_rate': controller_lr})\n self.update_arch_frequency = update_arch_frequency\n self.val_acc = 0\n # async controller sample\n self._worker_pool = ThreadPool(2)\n self._data_buffer = {}\n self._rcvd_idx = 0\n self._sent_idx = 0\n self._timeout = 20\n # logging history\n self.training_history = []\n self._prefetch_controller()\n\n def initialize_miscs(self, train_set, val_set, batch_size, num_cpus, num_gpus,\n train_args, val_args):\n \"\"\"Initialize framework related miscs, such as train/val data and train/val\n function arguments.\n \"\"\"\n ctx = [mx.gpu(i) for i in range(num_gpus)] if num_gpus > 0 else [mx.cpu(0)]\n self.supernet.collect_params().reset_ctx(ctx)\n self.supernet.hybridize()\n dataset_name = train_set\n\n if isinstance(train_set, str):\n train_set = get_built_in_dataset(dataset_name, train=True, batch_size=batch_size,\n num_workers=num_cpus, shuffle=True)\n val_set = get_built_in_dataset(dataset_name, train=False, batch_size=batch_size,\n num_workers=num_cpus, shuffle=True)\n if isinstance(train_set, gluon.data.Dataset):\n self.train_data = DataLoader(\n train_set, batch_size=batch_size, shuffle=True,\n last_batch=\"discard\", num_workers=num_cpus)\n # very important, make shuffle for training contoller\n self.val_data = DataLoader(\n val_set, batch_size=batch_size, shuffle=True,\n num_workers=num_cpus, prefetch=0, sample_times=self.controller_batch_size)\n else:\n self.train_data = train_set\n self.val_data = val_set\n iters_per_epoch = len(self.train_data) if hasattr(self.train_data, '__len__') else \\\n IMAGENET_TRAINING_SAMPLES // batch_size\n self.train_args = init_default_train_args(batch_size, self.supernet, self.epochs, iters_per_epoch) \\\n if len(train_args) == 0 else train_args\n self.val_args = val_args\n self.val_args['ctx'] = ctx\n self.val_args['batch_fn'] = imagenet_batch_fn if dataset_name == 'imagenet' else default_batch_fn\n self.train_args['ctx'] = ctx\n self.train_args['batch_fn'] = imagenet_batch_fn if dataset_name == 'imagenet' else default_batch_fn\n self.ctx = ctx\n\n def run(self):\n tq = tqdm(range(self.epochs))\n for epoch in tq:\n # for recordio data\n if hasattr(self.train_data, 'reset'): self.train_data.reset()\n tbar = tqdm(self.train_data)\n idx = 0\n for batch in tbar:\n # sample network configuration\n config = self.controller.pre_sample()[0]\n self.supernet.sample(**config)\n # self.train_fn(self.supernet, batch, **self.train_args)\n self.train_fn(epoch, self.epochs, self.supernet, batch, **self.train_args)\n mx.nd.waitall()\n if epoch >= self.warmup_epochs and (idx % self.update_arch_frequency) == 0:\n self.train_controller()\n if self.plot_frequency > 0 and idx % self.plot_frequency == 0 and in_ipynb():\n graph = self.supernet.graph\n graph.attr(rankdir='LR', size='8,3')\n tbar.set_svg(graph._repr_svg_())\n tbar.set_description('avg reward: {:.2f}'.format(self.baseline))\n idx += 1\n self.validation()\n self.save()\n tq.set_description('epoch {}, val_acc: {:.2f}, avg reward: {:.2f}' \\\n .format(epoch, self.val_acc, self.baseline))\n\n def validation(self):\n if hasattr(self.val_data, 'reset'): self.val_data.reset()\n # data iter, avoid memory leak\n it = iter(self.val_data)\n if hasattr(it, 'reset_sample_times'): it.reset_sample_times()\n tbar = tqdm(it)\n # update network arc\n config = self.controller.inference()\n self.supernet.sample(**config)\n metric = mx.metric.Accuracy()\n for batch in tbar:\n self.eval_fn(self.supernet, batch, metric=metric, **self.val_args)\n reward = metric.get()[1]\n tbar.set_description('Val Acc: {}'.format(reward))\n\n self.val_acc = reward\n self.training_history.append(reward)\n\n def _sample_controller(self):\n assert self._rcvd_idx < self._sent_idx, \"rcvd_idx must be smaller than sent_idx\"\n try:\n ret = self._data_buffer.pop(self._rcvd_idx)\n self._rcvd_idx += 1\n return ret.get(timeout=self._timeout)\n except Exception:\n self._worker_pool.terminate()\n raise\n\n def _prefetch_controller(self):\n async_ret = self._worker_pool.apply_async(self._async_sample, ())\n self._data_buffer[self._sent_idx] = async_ret\n self._sent_idx += 1\n\n def _async_sample(self):\n with mx.autograd.record():\n # sample controller_batch_size number of configurations\n configs, log_probs, entropies = self.controller.sample(batch_size=self.controller_batch_size,\n with_details=True)\n return configs, log_probs, entropies\n\n def train_controller(self):\n \"\"\"Run multiple number of trials\n \"\"\"\n decay = self.ema_decay\n if hasattr(self.val_data, 'reset'): self.val_data.reset()\n # update \n metric = mx.metric.Accuracy()\n with mx.autograd.record():\n # sample controller_batch_size number of configurations\n configs, log_probs, entropies = self._sample_controller()\n for i, batch in enumerate(self.val_data):\n if i >= self.controller_batch_size: break\n self.supernet.sample(**configs[i])\n # schedule the training tasks and gather the reward\n metric.reset()\n self.eval_fn(self.supernet, batch, metric=metric, **self.val_args)\n reward = metric.get()[1]\n reward = self.reward_fn(reward, self.supernet)\n self.baseline = reward if not self.baseline else self.baseline\n # substract baseline\n avg_rewards = mx.nd.array([reward - self.baseline],\n ctx=self.controller.context)\n # EMA baseline\n self.baseline = decay * self.baseline + (1 - decay) * reward\n # negative policy gradient\n log_prob = log_probs[i]\n log_prob = log_prob.sum()\n loss = - log_prob * avg_rewards\n loss = loss.sum()\n\n # update\n loss.backward()\n self.controller_optimizer.step(self.controller_batch_size)\n self._prefetch_controller()\n\n def load(self, checkname=None):\n checkname = checkname if checkname else self.checkname\n state_dict = load(checkname)\n self.load_state_dict(state_dict)\n\n def save(self, checkname=None):\n checkname = checkname if checkname else self.checkname\n mkdir(os.path.dirname(checkname))\n save(self.state_dict(), checkname)\n\n def state_dict(self, destination=None):\n if destination is None:\n destination = OrderedDict()\n destination._metadata = OrderedDict()\n destination['supernet_params'] = collect_params(self.supernet)\n destination['controller_params'] = collect_params(self.controller)\n destination['training_history'] = self.training_history\n return destination\n\n def load_state_dict(self, state_dict):\n update_params(self.supernet, state_dict['supernet_params'], ctx=self.ctx)\n update_params(self.controller, state_dict['controller_params'], ctx=self.controller.context)\n self.training_history = state_dict['training_history']\n", "path": "autogluon/contrib/enas/enas_scheduler.py"}]} | 3,454 | 282 |
gh_patches_debug_1536 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-2525 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Demo template management command unexpected args.
## Description
<!-- A clear and concise description of what the bug is. -->
After starting dev environment, the management command to setup the demo DB is broken. Trying to run:
```sh
# docker exec -it mathesar_service_dev python manage.py setup_demo_template_db
```
results in:
```
Traceback (most recent call last):
File "/code/manage.py", line 22, in <module>
main()
File "/code/manage.py", line 18, in main
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.9/site-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.9/site-packages/django/core/management/__init__.py", line 395, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/usr/local/lib/python3.9/site-packages/django/core/management/base.py", line 330, in run_from_argv
self.execute(*args, **cmd_options)
File "/usr/local/lib/python3.9/site-packages/django/core/management/base.py", line 371, in execute
output = self.handle(*args, **options)
File "/code/demo/management/commands/setup_demo_template_db.py", line 15, in handle
_setup_demo_template_db(*args, **options)
TypeError: _setup_demo_template_db() got an unexpected keyword argument 'verbosity'
```
## Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
You should be able to run the command listed above successfully in the `dev` environment.
## To Reproduce
<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->
Start the dev environment with a fresh docker state. Run the above command.
## Additional context
<!-- Add any other context about the problem or screenshots here. -->
The problem is in line 15 of `demo/management/commands/setup_demo_template.py`.
</issue>
<code>
[start of demo/management/commands/setup_demo_template_db.py]
1 from sqlalchemy import text
2
3 from django.conf import settings
4 from django.core.management import BaseCommand
5
6 from db.install import install_mathesar
7 from demo.install.datasets import load_datasets
8 from mathesar.database.base import create_mathesar_engine
9
10
11 class Command(BaseCommand):
12 help = 'Initialize the demo template database.'
13
14 def handle(self, *args, **options):
15 _setup_demo_template_db(*args, **options)
16
17
18 def _setup_demo_template_db():
19 print("Initializing demo template database...")
20
21 template_db_name = settings.MATHESAR_DEMO_TEMPLATE
22 root_engine = create_mathesar_engine(settings.DATABASES["default"]["NAME"])
23 with root_engine.connect() as conn:
24 conn.execution_options(isolation_level="AUTOCOMMIT")
25 conn.execute(text(f"DROP DATABASE IF EXISTS {template_db_name} WITH (FORCE)"))
26 root_engine.dispose()
27 install_mathesar(
28 database_name=template_db_name,
29 username=settings.DATABASES["default"]["USER"],
30 password=settings.DATABASES["default"]["PASSWORD"],
31 hostname=settings.DATABASES["default"]["HOST"],
32 port=settings.DATABASES["default"]["PORT"],
33 skip_confirm=True
34 )
35 user_engine = create_mathesar_engine(template_db_name)
36 load_datasets(user_engine)
37 user_engine.dispose()
38
[end of demo/management/commands/setup_demo_template_db.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/demo/management/commands/setup_demo_template_db.py b/demo/management/commands/setup_demo_template_db.py
--- a/demo/management/commands/setup_demo_template_db.py
+++ b/demo/management/commands/setup_demo_template_db.py
@@ -12,7 +12,7 @@
help = 'Initialize the demo template database.'
def handle(self, *args, **options):
- _setup_demo_template_db(*args, **options)
+ _setup_demo_template_db()
def _setup_demo_template_db():
| {"golden_diff": "diff --git a/demo/management/commands/setup_demo_template_db.py b/demo/management/commands/setup_demo_template_db.py\n--- a/demo/management/commands/setup_demo_template_db.py\n+++ b/demo/management/commands/setup_demo_template_db.py\n@@ -12,7 +12,7 @@\n help = 'Initialize the demo template database.'\n \n def handle(self, *args, **options):\n- _setup_demo_template_db(*args, **options)\n+ _setup_demo_template_db()\n \n \n def _setup_demo_template_db():\n", "issue": "Demo template management command unexpected args.\n## Description\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\nAfter starting dev environment, the management command to setup the demo DB is broken. Trying to run:\r\n```sh\r\n# docker exec -it mathesar_service_dev python manage.py setup_demo_template_db\r\n```\r\nresults in:\r\n```\r\nTraceback (most recent call last):\r\n File \"/code/manage.py\", line 22, in <module>\r\n main()\r\n File \"/code/manage.py\", line 18, in main\r\n execute_from_command_line(sys.argv)\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/management/__init__.py\", line 401, in execute_from_command_line\r\n utility.execute()\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/management/__init__.py\", line 395, in execute\r\n self.fetch_command(subcommand).run_from_argv(self.argv)\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/management/base.py\", line 330, in run_from_argv\r\n self.execute(*args, **cmd_options)\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/management/base.py\", line 371, in execute\r\n output = self.handle(*args, **options)\r\n File \"/code/demo/management/commands/setup_demo_template_db.py\", line 15, in handle\r\n _setup_demo_template_db(*args, **options)\r\nTypeError: _setup_demo_template_db() got an unexpected keyword argument 'verbosity'\r\n```\r\n\r\n## Expected behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\nYou should be able to run the command listed above successfully in the `dev` environment.\r\n\r\n## To Reproduce\r\n<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->\r\n\r\nStart the dev environment with a fresh docker state. Run the above command.\r\n\r\n## Additional context\r\n<!-- Add any other context about the problem or screenshots here. -->\r\n\r\nThe problem is in line 15 of `demo/management/commands/setup_demo_template.py`.\n", "before_files": [{"content": "from sqlalchemy import text\n\nfrom django.conf import settings\nfrom django.core.management import BaseCommand\n\nfrom db.install import install_mathesar\nfrom demo.install.datasets import load_datasets\nfrom mathesar.database.base import create_mathesar_engine\n\n\nclass Command(BaseCommand):\n help = 'Initialize the demo template database.'\n\n def handle(self, *args, **options):\n _setup_demo_template_db(*args, **options)\n\n\ndef _setup_demo_template_db():\n print(\"Initializing demo template database...\")\n\n template_db_name = settings.MATHESAR_DEMO_TEMPLATE\n root_engine = create_mathesar_engine(settings.DATABASES[\"default\"][\"NAME\"])\n with root_engine.connect() as conn:\n conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n conn.execute(text(f\"DROP DATABASE IF EXISTS {template_db_name} WITH (FORCE)\"))\n root_engine.dispose()\n install_mathesar(\n database_name=template_db_name,\n username=settings.DATABASES[\"default\"][\"USER\"],\n password=settings.DATABASES[\"default\"][\"PASSWORD\"],\n hostname=settings.DATABASES[\"default\"][\"HOST\"],\n port=settings.DATABASES[\"default\"][\"PORT\"],\n skip_confirm=True\n )\n user_engine = create_mathesar_engine(template_db_name)\n load_datasets(user_engine)\n user_engine.dispose()\n", "path": "demo/management/commands/setup_demo_template_db.py"}]} | 1,349 | 115 |
gh_patches_debug_15065 | rasdani/github-patches | git_diff | mkdocs__mkdocs-1986 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Nested dot folders copied upon build
Sometimes it's necessary to include files in a theme which MkDocs should conditionally include in the templates, but not copy to the `site` directory when the documentation is built. As discussed in #1980, MkDocs will ignore files that start with a `.`, but this only applies to top-level directories. It would be great if MkDocs could exclude nested dot directories from the build.
A concrete example: the next version of the Material theme will bundle the FontAwesome files as SVGs for inlining during build time, but MkDocs copies all of them to the `site` directory. The folder structure is:
```
assets/images/icons/fontawesome/...
```
The current workaround is to add a `*.html` to the files bundles with the theme, resulting in a `*.svg.html` extension for each of the 1.500 icons. While this works, it's semantically incorrect.
If MkDocs would ignore nested dot directories, we could just use:
```
assets/images/icons/.fontawesome/...
```
As pointed out by @waylan, the code in question is here:
https://github.com/mkdocs/mkdocs/blob/2fca717794c0c2e581b8ba17149bc292edeb83e4/mkdocs/structure/files.py#L67-L69
I'm not proficient in Python, but since this looks like a glob implementation, I would guess that `**/.*` would solve the problem
</issue>
<code>
[start of mkdocs/structure/files.py]
1 import fnmatch
2 import os
3 import logging
4 from functools import cmp_to_key
5 from urllib.parse import quote as urlquote
6
7 from mkdocs import utils
8
9
10 log = logging.getLogger(__name__)
11 log.addFilter(utils.warning_filter)
12
13
14 class Files:
15 """ A collection of File objects. """
16 def __init__(self, files):
17 self._files = files
18 self.src_paths = {file.src_path: file for file in files}
19
20 def __iter__(self):
21 return iter(self._files)
22
23 def __len__(self):
24 return len(self._files)
25
26 def __contains__(self, path):
27 return path in self.src_paths
28
29 def get_file_from_path(self, path):
30 """ Return a File instance with File.src_path equal to path. """
31 return self.src_paths.get(os.path.normpath(path))
32
33 def append(self, file):
34 """ Append file to Files collection. """
35 self._files.append(file)
36 self.src_paths[file.src_path] = file
37
38 def copy_static_files(self, dirty=False):
39 """ Copy static files from source to destination. """
40 for file in self:
41 if not file.is_documentation_page():
42 file.copy_file(dirty)
43
44 def documentation_pages(self):
45 """ Return iterable of all Markdown page file objects. """
46 return [file for file in self if file.is_documentation_page()]
47
48 def static_pages(self):
49 """ Return iterable of all static page file objects. """
50 return [file for file in self if file.is_static_page()]
51
52 def media_files(self):
53 """ Return iterable of all file objects which are not documentation or static pages. """
54 return [file for file in self if file.is_media_file()]
55
56 def javascript_files(self):
57 """ Return iterable of all javascript file objects. """
58 return [file for file in self if file.is_javascript()]
59
60 def css_files(self):
61 """ Return iterable of all CSS file objects. """
62 return [file for file in self if file.is_css()]
63
64 def add_files_from_theme(self, env, config):
65 """ Retrieve static files from Jinja environment and add to collection. """
66 def filter(name):
67 patterns = ['.*', '*.py', '*.pyc', '*.html', '*readme*', 'mkdocs_theme.yml']
68 patterns.extend('*{}'.format(x) for x in utils.markdown_extensions)
69 patterns.extend(config['theme'].static_templates)
70 for pattern in patterns:
71 if fnmatch.fnmatch(name.lower(), pattern):
72 return False
73 return True
74 for path in env.list_templates(filter_func=filter):
75 # Theme files do not override docs_dir files
76 path = os.path.normpath(path)
77 if path not in self:
78 for dir in config['theme'].dirs:
79 # Find the first theme dir which contains path
80 if os.path.isfile(os.path.join(dir, path)):
81 self.append(File(path, dir, config['site_dir'], config['use_directory_urls']))
82 break
83
84
85 class File:
86 """
87 A MkDocs File object.
88
89 Points to the source and destination locations of a file.
90
91 The `path` argument must be a path that exists relative to `src_dir`.
92
93 The `src_dir` and `dest_dir` must be absolute paths on the local file system.
94
95 The `use_directory_urls` argument controls how destination paths are generated. If `False`, a Markdown file is
96 mapped to an HTML file of the same name (the file extension is changed to `.html`). If True, a Markdown file is
97 mapped to an HTML index file (`index.html`) nested in a directory using the "name" of the file in `path`. The
98 `use_directory_urls` argument has no effect on non-Markdown files.
99
100 File objects have the following properties, which are Unicode strings:
101
102 File.src_path
103 The pure path of the source file relative to the source directory.
104
105 File.abs_src_path
106 The absolute concrete path of the source file.
107
108 File.dest_path
109 The pure path of the destination file relative to the destination directory.
110
111 File.abs_dest_path
112 The absolute concrete path of the destination file.
113
114 File.url
115 The url of the destination file relative to the destination directory as a string.
116 """
117 def __init__(self, path, src_dir, dest_dir, use_directory_urls):
118 self.page = None
119 self.src_path = os.path.normpath(path)
120 self.abs_src_path = os.path.normpath(os.path.join(src_dir, self.src_path))
121 self.name = self._get_stem()
122 self.dest_path = self._get_dest_path(use_directory_urls)
123 self.abs_dest_path = os.path.normpath(os.path.join(dest_dir, self.dest_path))
124 self.url = self._get_url(use_directory_urls)
125
126 def __eq__(self, other):
127
128 def sub_dict(d):
129 return {key: value for key, value in d.items() if key in ['src_path', 'abs_src_path', 'url']}
130
131 return (isinstance(other, self.__class__) and sub_dict(self.__dict__) == sub_dict(other.__dict__))
132
133 def __ne__(self, other):
134 return not self.__eq__(other)
135
136 def _get_stem(self):
137 """ Return the name of the file without it's extension. """
138 filename = os.path.basename(self.src_path)
139 stem, ext = os.path.splitext(filename)
140 return 'index' if stem in ('index', 'README') else stem
141
142 def _get_dest_path(self, use_directory_urls):
143 """ Return destination path based on source path. """
144 if self.is_documentation_page():
145 if use_directory_urls:
146 parent, filename = os.path.split(self.src_path)
147 if self.name == 'index':
148 # index.md or README.md => index.html
149 return os.path.join(parent, 'index.html')
150 else:
151 # foo.md => foo/index.html
152 return os.path.join(parent, self.name, 'index.html')
153 else:
154 # foo.md => foo.html
155 root, ext = os.path.splitext(self.src_path)
156 return root + '.html'
157 return self.src_path
158
159 def _get_url(self, use_directory_urls):
160 """ Return url based in destination path. """
161 url = self.dest_path.replace(os.path.sep, '/')
162 dirname, filename = os.path.split(url)
163 if use_directory_urls and filename == 'index.html':
164 if dirname == '':
165 url = '.'
166 else:
167 url = dirname + '/'
168 return urlquote(url)
169
170 def url_relative_to(self, other):
171 """ Return url for file relative to other file. """
172 return utils.get_relative_url(self.url, other.url if isinstance(other, File) else other)
173
174 def copy_file(self, dirty=False):
175 """ Copy source file to destination, ensuring parent directories exist. """
176 if dirty and not self.is_modified():
177 log.debug("Skip copying unmodified file: '{}'".format(self.src_path))
178 else:
179 log.debug("Copying media file: '{}'".format(self.src_path))
180 utils.copy_file(self.abs_src_path, self.abs_dest_path)
181
182 def is_modified(self):
183 if os.path.isfile(self.abs_dest_path):
184 return os.path.getmtime(self.abs_dest_path) < os.path.getmtime(self.abs_src_path)
185 return True
186
187 def is_documentation_page(self):
188 """ Return True if file is a Markdown page. """
189 return os.path.splitext(self.src_path)[1] in utils.markdown_extensions
190
191 def is_static_page(self):
192 """ Return True if file is a static page (html, xml, json). """
193 return os.path.splitext(self.src_path)[1] in (
194 '.html',
195 '.htm',
196 '.xml',
197 '.json',
198 )
199
200 def is_media_file(self):
201 """ Return True if file is not a documentation or static page. """
202 return not (self.is_documentation_page() or self.is_static_page())
203
204 def is_javascript(self):
205 """ Return True if file is a JavaScript file. """
206 return os.path.splitext(self.src_path)[1] in (
207 '.js',
208 '.javascript',
209 )
210
211 def is_css(self):
212 """ Return True if file is a CSS file. """
213 return os.path.splitext(self.src_path)[1] in (
214 '.css',
215 )
216
217
218 def get_files(config):
219 """ Walk the `docs_dir` and return a Files collection. """
220 files = []
221 exclude = ['.*', '/templates']
222
223 for source_dir, dirnames, filenames in os.walk(config['docs_dir'], followlinks=True):
224 relative_dir = os.path.relpath(source_dir, config['docs_dir'])
225
226 for dirname in list(dirnames):
227 path = os.path.normpath(os.path.join(relative_dir, dirname))
228 # Skip any excluded directories
229 if _filter_paths(basename=dirname, path=path, is_dir=True, exclude=exclude):
230 dirnames.remove(dirname)
231 dirnames.sort()
232
233 for filename in _sort_files(filenames):
234 path = os.path.normpath(os.path.join(relative_dir, filename))
235 # Skip any excluded files
236 if _filter_paths(basename=filename, path=path, is_dir=False, exclude=exclude):
237 continue
238 # Skip README.md if an index file also exists in dir
239 if filename.lower() == 'readme.md' and 'index.md' in filenames:
240 log.warning("Both index.md and readme.md found. Skipping readme.md from {}".format(source_dir))
241 continue
242 files.append(File(path, config['docs_dir'], config['site_dir'], config['use_directory_urls']))
243
244 return Files(files)
245
246
247 def _sort_files(filenames):
248 """ Always sort `index` or `README` as first filename in list. """
249
250 def compare(x, y):
251 if x == y:
252 return 0
253 if os.path.splitext(y)[0] in ['index', 'README']:
254 return 1
255 if os.path.splitext(x)[0] in ['index', 'README'] or x < y:
256 return -1
257 return 1
258
259 return sorted(filenames, key=cmp_to_key(compare))
260
261
262 def _filter_paths(basename, path, is_dir, exclude):
263 """ .gitignore style file filtering. """
264 for item in exclude:
265 # Items ending in '/' apply only to directories.
266 if item.endswith('/') and not is_dir:
267 continue
268 # Items starting with '/' apply to the whole path.
269 # In any other cases just the basename is used.
270 match = path if item.startswith('/') else basename
271 if fnmatch.fnmatch(match, item.strip('/')):
272 return True
273 return False
274
[end of mkdocs/structure/files.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mkdocs/structure/files.py b/mkdocs/structure/files.py
--- a/mkdocs/structure/files.py
+++ b/mkdocs/structure/files.py
@@ -64,7 +64,8 @@
def add_files_from_theme(self, env, config):
""" Retrieve static files from Jinja environment and add to collection. """
def filter(name):
- patterns = ['.*', '*.py', '*.pyc', '*.html', '*readme*', 'mkdocs_theme.yml']
+ # '.*' filters dot files/dirs at root level whereas '*/.*' filters nested levels
+ patterns = ['.*', '*/.*', '*.py', '*.pyc', '*.html', '*readme*', 'mkdocs_theme.yml']
patterns.extend('*{}'.format(x) for x in utils.markdown_extensions)
patterns.extend(config['theme'].static_templates)
for pattern in patterns:
| {"golden_diff": "diff --git a/mkdocs/structure/files.py b/mkdocs/structure/files.py\n--- a/mkdocs/structure/files.py\n+++ b/mkdocs/structure/files.py\n@@ -64,7 +64,8 @@\n def add_files_from_theme(self, env, config):\n \"\"\" Retrieve static files from Jinja environment and add to collection. \"\"\"\n def filter(name):\n- patterns = ['.*', '*.py', '*.pyc', '*.html', '*readme*', 'mkdocs_theme.yml']\n+ # '.*' filters dot files/dirs at root level whereas '*/.*' filters nested levels\n+ patterns = ['.*', '*/.*', '*.py', '*.pyc', '*.html', '*readme*', 'mkdocs_theme.yml']\n patterns.extend('*{}'.format(x) for x in utils.markdown_extensions)\n patterns.extend(config['theme'].static_templates)\n for pattern in patterns:\n", "issue": "Nested dot folders copied upon build\nSometimes it's necessary to include files in a theme which MkDocs should conditionally include in the templates, but not copy to the `site` directory when the documentation is built. As discussed in #1980, MkDocs will ignore files that start with a `.`, but this only applies to top-level directories. It would be great if MkDocs could exclude nested dot directories from the build.\r\n\r\nA concrete example: the next version of the Material theme will bundle the FontAwesome files as SVGs for inlining during build time, but MkDocs copies all of them to the `site` directory. The folder structure is:\r\n\r\n```\r\nassets/images/icons/fontawesome/...\r\n```\r\n\r\nThe current workaround is to add a `*.html` to the files bundles with the theme, resulting in a `*.svg.html` extension for each of the 1.500 icons. While this works, it's semantically incorrect.\r\n\r\nIf MkDocs would ignore nested dot directories, we could just use:\r\n\r\n```\r\nassets/images/icons/.fontawesome/...\r\n```\r\n\r\nAs pointed out by @waylan, the code in question is here:\r\n\r\nhttps://github.com/mkdocs/mkdocs/blob/2fca717794c0c2e581b8ba17149bc292edeb83e4/mkdocs/structure/files.py#L67-L69\r\n\r\nI'm not proficient in Python, but since this looks like a glob implementation, I would guess that `**/.*` would solve the problem\n", "before_files": [{"content": "import fnmatch\nimport os\nimport logging\nfrom functools import cmp_to_key\nfrom urllib.parse import quote as urlquote\n\nfrom mkdocs import utils\n\n\nlog = logging.getLogger(__name__)\nlog.addFilter(utils.warning_filter)\n\n\nclass Files:\n \"\"\" A collection of File objects. \"\"\"\n def __init__(self, files):\n self._files = files\n self.src_paths = {file.src_path: file for file in files}\n\n def __iter__(self):\n return iter(self._files)\n\n def __len__(self):\n return len(self._files)\n\n def __contains__(self, path):\n return path in self.src_paths\n\n def get_file_from_path(self, path):\n \"\"\" Return a File instance with File.src_path equal to path. \"\"\"\n return self.src_paths.get(os.path.normpath(path))\n\n def append(self, file):\n \"\"\" Append file to Files collection. \"\"\"\n self._files.append(file)\n self.src_paths[file.src_path] = file\n\n def copy_static_files(self, dirty=False):\n \"\"\" Copy static files from source to destination. \"\"\"\n for file in self:\n if not file.is_documentation_page():\n file.copy_file(dirty)\n\n def documentation_pages(self):\n \"\"\" Return iterable of all Markdown page file objects. \"\"\"\n return [file for file in self if file.is_documentation_page()]\n\n def static_pages(self):\n \"\"\" Return iterable of all static page file objects. \"\"\"\n return [file for file in self if file.is_static_page()]\n\n def media_files(self):\n \"\"\" Return iterable of all file objects which are not documentation or static pages. \"\"\"\n return [file for file in self if file.is_media_file()]\n\n def javascript_files(self):\n \"\"\" Return iterable of all javascript file objects. \"\"\"\n return [file for file in self if file.is_javascript()]\n\n def css_files(self):\n \"\"\" Return iterable of all CSS file objects. \"\"\"\n return [file for file in self if file.is_css()]\n\n def add_files_from_theme(self, env, config):\n \"\"\" Retrieve static files from Jinja environment and add to collection. \"\"\"\n def filter(name):\n patterns = ['.*', '*.py', '*.pyc', '*.html', '*readme*', 'mkdocs_theme.yml']\n patterns.extend('*{}'.format(x) for x in utils.markdown_extensions)\n patterns.extend(config['theme'].static_templates)\n for pattern in patterns:\n if fnmatch.fnmatch(name.lower(), pattern):\n return False\n return True\n for path in env.list_templates(filter_func=filter):\n # Theme files do not override docs_dir files\n path = os.path.normpath(path)\n if path not in self:\n for dir in config['theme'].dirs:\n # Find the first theme dir which contains path\n if os.path.isfile(os.path.join(dir, path)):\n self.append(File(path, dir, config['site_dir'], config['use_directory_urls']))\n break\n\n\nclass File:\n \"\"\"\n A MkDocs File object.\n\n Points to the source and destination locations of a file.\n\n The `path` argument must be a path that exists relative to `src_dir`.\n\n The `src_dir` and `dest_dir` must be absolute paths on the local file system.\n\n The `use_directory_urls` argument controls how destination paths are generated. If `False`, a Markdown file is\n mapped to an HTML file of the same name (the file extension is changed to `.html`). If True, a Markdown file is\n mapped to an HTML index file (`index.html`) nested in a directory using the \"name\" of the file in `path`. The\n `use_directory_urls` argument has no effect on non-Markdown files.\n\n File objects have the following properties, which are Unicode strings:\n\n File.src_path\n The pure path of the source file relative to the source directory.\n\n File.abs_src_path\n The absolute concrete path of the source file.\n\n File.dest_path\n The pure path of the destination file relative to the destination directory.\n\n File.abs_dest_path\n The absolute concrete path of the destination file.\n\n File.url\n The url of the destination file relative to the destination directory as a string.\n \"\"\"\n def __init__(self, path, src_dir, dest_dir, use_directory_urls):\n self.page = None\n self.src_path = os.path.normpath(path)\n self.abs_src_path = os.path.normpath(os.path.join(src_dir, self.src_path))\n self.name = self._get_stem()\n self.dest_path = self._get_dest_path(use_directory_urls)\n self.abs_dest_path = os.path.normpath(os.path.join(dest_dir, self.dest_path))\n self.url = self._get_url(use_directory_urls)\n\n def __eq__(self, other):\n\n def sub_dict(d):\n return {key: value for key, value in d.items() if key in ['src_path', 'abs_src_path', 'url']}\n\n return (isinstance(other, self.__class__) and sub_dict(self.__dict__) == sub_dict(other.__dict__))\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def _get_stem(self):\n \"\"\" Return the name of the file without it's extension. \"\"\"\n filename = os.path.basename(self.src_path)\n stem, ext = os.path.splitext(filename)\n return 'index' if stem in ('index', 'README') else stem\n\n def _get_dest_path(self, use_directory_urls):\n \"\"\" Return destination path based on source path. \"\"\"\n if self.is_documentation_page():\n if use_directory_urls:\n parent, filename = os.path.split(self.src_path)\n if self.name == 'index':\n # index.md or README.md => index.html\n return os.path.join(parent, 'index.html')\n else:\n # foo.md => foo/index.html\n return os.path.join(parent, self.name, 'index.html')\n else:\n # foo.md => foo.html\n root, ext = os.path.splitext(self.src_path)\n return root + '.html'\n return self.src_path\n\n def _get_url(self, use_directory_urls):\n \"\"\" Return url based in destination path. \"\"\"\n url = self.dest_path.replace(os.path.sep, '/')\n dirname, filename = os.path.split(url)\n if use_directory_urls and filename == 'index.html':\n if dirname == '':\n url = '.'\n else:\n url = dirname + '/'\n return urlquote(url)\n\n def url_relative_to(self, other):\n \"\"\" Return url for file relative to other file. \"\"\"\n return utils.get_relative_url(self.url, other.url if isinstance(other, File) else other)\n\n def copy_file(self, dirty=False):\n \"\"\" Copy source file to destination, ensuring parent directories exist. \"\"\"\n if dirty and not self.is_modified():\n log.debug(\"Skip copying unmodified file: '{}'\".format(self.src_path))\n else:\n log.debug(\"Copying media file: '{}'\".format(self.src_path))\n utils.copy_file(self.abs_src_path, self.abs_dest_path)\n\n def is_modified(self):\n if os.path.isfile(self.abs_dest_path):\n return os.path.getmtime(self.abs_dest_path) < os.path.getmtime(self.abs_src_path)\n return True\n\n def is_documentation_page(self):\n \"\"\" Return True if file is a Markdown page. \"\"\"\n return os.path.splitext(self.src_path)[1] in utils.markdown_extensions\n\n def is_static_page(self):\n \"\"\" Return True if file is a static page (html, xml, json). \"\"\"\n return os.path.splitext(self.src_path)[1] in (\n '.html',\n '.htm',\n '.xml',\n '.json',\n )\n\n def is_media_file(self):\n \"\"\" Return True if file is not a documentation or static page. \"\"\"\n return not (self.is_documentation_page() or self.is_static_page())\n\n def is_javascript(self):\n \"\"\" Return True if file is a JavaScript file. \"\"\"\n return os.path.splitext(self.src_path)[1] in (\n '.js',\n '.javascript',\n )\n\n def is_css(self):\n \"\"\" Return True if file is a CSS file. \"\"\"\n return os.path.splitext(self.src_path)[1] in (\n '.css',\n )\n\n\ndef get_files(config):\n \"\"\" Walk the `docs_dir` and return a Files collection. \"\"\"\n files = []\n exclude = ['.*', '/templates']\n\n for source_dir, dirnames, filenames in os.walk(config['docs_dir'], followlinks=True):\n relative_dir = os.path.relpath(source_dir, config['docs_dir'])\n\n for dirname in list(dirnames):\n path = os.path.normpath(os.path.join(relative_dir, dirname))\n # Skip any excluded directories\n if _filter_paths(basename=dirname, path=path, is_dir=True, exclude=exclude):\n dirnames.remove(dirname)\n dirnames.sort()\n\n for filename in _sort_files(filenames):\n path = os.path.normpath(os.path.join(relative_dir, filename))\n # Skip any excluded files\n if _filter_paths(basename=filename, path=path, is_dir=False, exclude=exclude):\n continue\n # Skip README.md if an index file also exists in dir\n if filename.lower() == 'readme.md' and 'index.md' in filenames:\n log.warning(\"Both index.md and readme.md found. Skipping readme.md from {}\".format(source_dir))\n continue\n files.append(File(path, config['docs_dir'], config['site_dir'], config['use_directory_urls']))\n\n return Files(files)\n\n\ndef _sort_files(filenames):\n \"\"\" Always sort `index` or `README` as first filename in list. \"\"\"\n\n def compare(x, y):\n if x == y:\n return 0\n if os.path.splitext(y)[0] in ['index', 'README']:\n return 1\n if os.path.splitext(x)[0] in ['index', 'README'] or x < y:\n return -1\n return 1\n\n return sorted(filenames, key=cmp_to_key(compare))\n\n\ndef _filter_paths(basename, path, is_dir, exclude):\n \"\"\" .gitignore style file filtering. \"\"\"\n for item in exclude:\n # Items ending in '/' apply only to directories.\n if item.endswith('/') and not is_dir:\n continue\n # Items starting with '/' apply to the whole path.\n # In any other cases just the basename is used.\n match = path if item.startswith('/') else basename\n if fnmatch.fnmatch(match, item.strip('/')):\n return True\n return False\n", "path": "mkdocs/structure/files.py"}]} | 3,855 | 197 |
gh_patches_debug_5188 | rasdani/github-patches | git_diff | Lightning-Universe__lightning-flash-104 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Download Data Fails if Content Length Not Defined in Header
## 🐛 Bug
When I try to download a zip file using download_data from flash.core.data it fails because the response header does not contain a value for 'Content Length' this should be check for and handled in the code.
### To Reproduce
Steps to reproduce the behavior:
KeyError Traceback (most recent call last)
<ipython-input-7-aa10e89f3a8e> in <module>()
1 # 1. Download the data
----> 2 download_data("https://github.com/karoldvl/ESC-50/archive/master.zip", 'data/')
2 frames
/content/gdrive/MyDrive/lightning-flash/flash/core/data/utils.py in download_data(url, path)
75
76 """
---> 77 download_file(url, path)
78
79
/content/gdrive/MyDrive/lightning-flash/flash/core/data/utils.py in download_file(url, path, verbose)
36 local_filename = os.path.join(path, url.split('/')[-1])
37 r = requests.get(url, stream=True)
---> 38 file_size = int(r.headers['Content-Length'])
39 chunk = 1
40 chunk_size = 1024
/usr/local/lib/python3.6/dist-packages/requests/structures.py in __getitem__(self, key)
52
53 def __getitem__(self, key):
---> 54 return self._store[key.lower()][1]
55
56 def __delitem__(self, key):
KeyError: 'content-length'
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
#### Code sample
<!-- Ideally attach a minimal code sample to reproduce the decried issue.
Minimal means having the shortest code but still preserving the bug. -->
```python
import flash
from flash.core.data import download_data
download_data("https://github.com/karoldvl/ESC-50/archive/master.zip", 'data/')
```
### Expected behavior
File downloads and extracts ESC-50 data into datasets folder
### Environment
Default Collab Configuration
### Additional context
<!-- Add any other context about the problem here. -->
</issue>
<code>
[start of flash/core/data/utils.py]
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os.path
16 import zipfile
17 from typing import Any, Type
18
19 import requests
20 import torch
21 from tqdm.auto import tqdm as tq
22
23
24 # Code taken from: https://gist.github.com/ruxi/5d6803c116ec1130d484a4ab8c00c603
25 # __author__ = "github.com/ruxi"
26 # __license__ = "MIT"
27 def download_file(url: str, path: str, verbose: bool = False) -> None:
28 """
29 Download file with progressbar
30
31 Usage:
32 download_file('http://web4host.net/5MB.zip')
33 """
34 if not os.path.exists(path):
35 os.makedirs(path)
36 local_filename = os.path.join(path, url.split('/')[-1])
37 r = requests.get(url, stream=True)
38 file_size = int(r.headers['Content-Length'])
39 chunk = 1
40 chunk_size = 1024
41 num_bars = int(file_size / chunk_size)
42 if verbose:
43 print(dict(file_size=file_size))
44 print(dict(num_bars=num_bars))
45
46 if not os.path.exists(local_filename):
47 with open(local_filename, 'wb') as fp:
48 for chunk in tq(
49 r.iter_content(chunk_size=chunk_size),
50 total=num_bars,
51 unit='KB',
52 desc=local_filename,
53 leave=True # progressbar stays
54 ):
55 fp.write(chunk) # type: ignore
56
57 if '.zip' in local_filename:
58 if os.path.exists(local_filename):
59 with zipfile.ZipFile(local_filename, 'r') as zip_ref:
60 zip_ref.extractall(path)
61
62
63 def download_data(url: str, path: str = "data/") -> None:
64 """
65 Downloads data automatically from the given url to the path. Defaults to data/ for the path.
66 Automatically handles .csv, .zip
67
68 Example::
69
70 from flash import download_data
71
72 Args:
73 url: path
74 path: local
75
76 """
77 download_file(url, path)
78
79
80 def _contains_any_tensor(value: Any, dtype: Type = torch.Tensor) -> bool:
81 # TODO: we should refactor FlashDatasetFolder to better integrate
82 # with DataPipeline. That way, we wouldn't need this check.
83 # This is because we are running transforms in both places.
84 if isinstance(value, dtype):
85 return True
86 if isinstance(value, (list, tuple)):
87 return any(_contains_any_tensor(v, dtype=dtype) for v in value)
88 elif isinstance(value, dict):
89 return any(_contains_any_tensor(v, dtype=dtype) for v in value.values())
90 return False
91
[end of flash/core/data/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flash/core/data/utils.py b/flash/core/data/utils.py
--- a/flash/core/data/utils.py
+++ b/flash/core/data/utils.py
@@ -35,7 +35,7 @@
os.makedirs(path)
local_filename = os.path.join(path, url.split('/')[-1])
r = requests.get(url, stream=True)
- file_size = int(r.headers['Content-Length'])
+ file_size = int(r.headers['Content-Length']) if 'Content-Length' in r.headers else 0
chunk = 1
chunk_size = 1024
num_bars = int(file_size / chunk_size)
| {"golden_diff": "diff --git a/flash/core/data/utils.py b/flash/core/data/utils.py\n--- a/flash/core/data/utils.py\n+++ b/flash/core/data/utils.py\n@@ -35,7 +35,7 @@\n os.makedirs(path)\n local_filename = os.path.join(path, url.split('/')[-1])\n r = requests.get(url, stream=True)\n- file_size = int(r.headers['Content-Length'])\n+ file_size = int(r.headers['Content-Length']) if 'Content-Length' in r.headers else 0\n chunk = 1\n chunk_size = 1024\n num_bars = int(file_size / chunk_size)\n", "issue": "Download Data Fails if Content Length Not Defined in Header\n## \ud83d\udc1b Bug\r\n\r\nWhen I try to download a zip file using download_data from flash.core.data it fails because the response header does not contain a value for 'Content Length' this should be check for and handled in the code. \r\n\r\n### To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\nKeyError Traceback (most recent call last)\r\n<ipython-input-7-aa10e89f3a8e> in <module>()\r\n 1 # 1. Download the data\r\n----> 2 download_data(\"https://github.com/karoldvl/ESC-50/archive/master.zip\", 'data/')\r\n\r\n2 frames\r\n/content/gdrive/MyDrive/lightning-flash/flash/core/data/utils.py in download_data(url, path)\r\n 75 \r\n 76 \"\"\"\r\n---> 77 download_file(url, path)\r\n 78 \r\n 79 \r\n\r\n/content/gdrive/MyDrive/lightning-flash/flash/core/data/utils.py in download_file(url, path, verbose)\r\n 36 local_filename = os.path.join(path, url.split('/')[-1])\r\n 37 r = requests.get(url, stream=True)\r\n---> 38 file_size = int(r.headers['Content-Length'])\r\n 39 chunk = 1\r\n 40 chunk_size = 1024\r\n\r\n/usr/local/lib/python3.6/dist-packages/requests/structures.py in __getitem__(self, key)\r\n 52 \r\n 53 def __getitem__(self, key):\r\n---> 54 return self._store[key.lower()][1]\r\n 55 \r\n 56 def __delitem__(self, key):\r\n\r\nKeyError: 'content-length'\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n\r\n\r\n#### Code sample\r\n<!-- Ideally attach a minimal code sample to reproduce the decried issue.\r\nMinimal means having the shortest code but still preserving the bug. -->\r\n\r\n```python\r\nimport flash\r\nfrom flash.core.data import download_data\r\ndownload_data(\"https://github.com/karoldvl/ESC-50/archive/master.zip\", 'data/')\r\n```\r\n\r\n### Expected behavior\r\n\r\nFile downloads and extracts ESC-50 data into datasets folder\r\n\r\n### Environment\r\n\r\nDefault Collab Configuration \r\n\r\n### Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os.path\nimport zipfile\nfrom typing import Any, Type\n\nimport requests\nimport torch\nfrom tqdm.auto import tqdm as tq\n\n\n# Code taken from: https://gist.github.com/ruxi/5d6803c116ec1130d484a4ab8c00c603\n# __author__ = \"github.com/ruxi\"\n# __license__ = \"MIT\"\ndef download_file(url: str, path: str, verbose: bool = False) -> None:\n \"\"\"\n Download file with progressbar\n\n Usage:\n download_file('http://web4host.net/5MB.zip')\n \"\"\"\n if not os.path.exists(path):\n os.makedirs(path)\n local_filename = os.path.join(path, url.split('/')[-1])\n r = requests.get(url, stream=True)\n file_size = int(r.headers['Content-Length'])\n chunk = 1\n chunk_size = 1024\n num_bars = int(file_size / chunk_size)\n if verbose:\n print(dict(file_size=file_size))\n print(dict(num_bars=num_bars))\n\n if not os.path.exists(local_filename):\n with open(local_filename, 'wb') as fp:\n for chunk in tq(\n r.iter_content(chunk_size=chunk_size),\n total=num_bars,\n unit='KB',\n desc=local_filename,\n leave=True # progressbar stays\n ):\n fp.write(chunk) # type: ignore\n\n if '.zip' in local_filename:\n if os.path.exists(local_filename):\n with zipfile.ZipFile(local_filename, 'r') as zip_ref:\n zip_ref.extractall(path)\n\n\ndef download_data(url: str, path: str = \"data/\") -> None:\n \"\"\"\n Downloads data automatically from the given url to the path. Defaults to data/ for the path.\n Automatically handles .csv, .zip\n\n Example::\n\n from flash import download_data\n\n Args:\n url: path\n path: local\n\n \"\"\"\n download_file(url, path)\n\n\ndef _contains_any_tensor(value: Any, dtype: Type = torch.Tensor) -> bool:\n # TODO: we should refactor FlashDatasetFolder to better integrate\n # with DataPipeline. That way, we wouldn't need this check.\n # This is because we are running transforms in both places.\n if isinstance(value, dtype):\n return True\n if isinstance(value, (list, tuple)):\n return any(_contains_any_tensor(v, dtype=dtype) for v in value)\n elif isinstance(value, dict):\n return any(_contains_any_tensor(v, dtype=dtype) for v in value.values())\n return False\n", "path": "flash/core/data/utils.py"}]} | 1,936 | 141 |
gh_patches_debug_7848 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-977 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Maptopicprio: Polygon may not be set
When I try to create a maptopic in the dashboard, it is not guaranteed that a polygon is already set. The map for setting a point therefore fails to display.
</issue>
<code>
[start of meinberlin/apps/maptopicprio/dashboard.py]
1 from django.urls import reverse
2 from django.utils.translation import ugettext_lazy as _
3
4 from meinberlin.apps.dashboard2 import DashboardComponent
5 from meinberlin.apps.dashboard2 import components
6
7 from . import models
8 from . import views
9
10
11 class MapTopicEditComponent(DashboardComponent):
12 identifier = 'map_topic_edit'
13 weight = 20
14 label = _('Places')
15
16 def is_effective(self, module):
17 module_app = module.phases[0].content().app
18 return module_app == 'meinberlin_maptopicprio'
19
20 def get_progress(self, module):
21 if models.MapTopic.objects.filter(module=module).exists():
22 return 1, 1
23 return 0, 1
24
25 def get_base_url(self, module):
26 return reverse('a4dashboard:maptopic-list', kwargs={
27 'module_slug': module.slug
28 })
29
30 def get_urls(self):
31 return [
32 (r'^maptopics/module/(?P<module_slug>[-\w_]+)/$',
33 views.MapTopicListDashboardView.as_view(component=self),
34 'maptopic-list'),
35 (r'^maptopics/create/module/(?P<module_slug>[-\w_]+)/$',
36 views.MapTopicCreateView.as_view(component=self),
37 'maptopic-create'),
38 (r'^maptopics/(?P<slug>[-\w_]+)/update/$',
39 views.MapTopicUpdateView.as_view(component=self),
40 'maptopic-update'),
41 (r'^maptopics/(?P<slug>[-\w_]+)/delete/$',
42 views.MapTopicDeleteView.as_view(component=self),
43 'maptopic-delete')
44 ]
45
46
47 components.register_module(MapTopicEditComponent())
48
[end of meinberlin/apps/maptopicprio/dashboard.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/maptopicprio/dashboard.py b/meinberlin/apps/maptopicprio/dashboard.py
--- a/meinberlin/apps/maptopicprio/dashboard.py
+++ b/meinberlin/apps/maptopicprio/dashboard.py
@@ -15,7 +15,12 @@
def is_effective(self, module):
module_app = module.phases[0].content().app
- return module_app == 'meinberlin_maptopicprio'
+ if module_app != 'meinberlin_maptopicprio':
+ return False
+ elif module.settings_instance.polygon == '':
+ return False
+ else:
+ return True
def get_progress(self, module):
if models.MapTopic.objects.filter(module=module).exists():
| {"golden_diff": "diff --git a/meinberlin/apps/maptopicprio/dashboard.py b/meinberlin/apps/maptopicprio/dashboard.py\n--- a/meinberlin/apps/maptopicprio/dashboard.py\n+++ b/meinberlin/apps/maptopicprio/dashboard.py\n@@ -15,7 +15,12 @@\n \n def is_effective(self, module):\n module_app = module.phases[0].content().app\n- return module_app == 'meinberlin_maptopicprio'\n+ if module_app != 'meinberlin_maptopicprio':\n+ return False\n+ elif module.settings_instance.polygon == '':\n+ return False\n+ else:\n+ return True\n \n def get_progress(self, module):\n if models.MapTopic.objects.filter(module=module).exists():\n", "issue": "Maptopicprio: Polygon may not be set\nWhen I try to create a maptopic in the dashboard, it is not guaranteed that a polygon is already set. The map for setting a point therefore fails to display.\n", "before_files": [{"content": "from django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom meinberlin.apps.dashboard2 import DashboardComponent\nfrom meinberlin.apps.dashboard2 import components\n\nfrom . import models\nfrom . import views\n\n\nclass MapTopicEditComponent(DashboardComponent):\n identifier = 'map_topic_edit'\n weight = 20\n label = _('Places')\n\n def is_effective(self, module):\n module_app = module.phases[0].content().app\n return module_app == 'meinberlin_maptopicprio'\n\n def get_progress(self, module):\n if models.MapTopic.objects.filter(module=module).exists():\n return 1, 1\n return 0, 1\n\n def get_base_url(self, module):\n return reverse('a4dashboard:maptopic-list', kwargs={\n 'module_slug': module.slug\n })\n\n def get_urls(self):\n return [\n (r'^maptopics/module/(?P<module_slug>[-\\w_]+)/$',\n views.MapTopicListDashboardView.as_view(component=self),\n 'maptopic-list'),\n (r'^maptopics/create/module/(?P<module_slug>[-\\w_]+)/$',\n views.MapTopicCreateView.as_view(component=self),\n 'maptopic-create'),\n (r'^maptopics/(?P<slug>[-\\w_]+)/update/$',\n views.MapTopicUpdateView.as_view(component=self),\n 'maptopic-update'),\n (r'^maptopics/(?P<slug>[-\\w_]+)/delete/$',\n views.MapTopicDeleteView.as_view(component=self),\n 'maptopic-delete')\n ]\n\n\ncomponents.register_module(MapTopicEditComponent())\n", "path": "meinberlin/apps/maptopicprio/dashboard.py"}]} | 1,040 | 177 |
gh_patches_debug_12751 | rasdani/github-patches | git_diff | bridgecrewio__checkov-2372 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
crash introduce by https://github.com/bridgecrewio/checkov/pull/2356
**Describe the issue**
```
checkov -d test1/
```
**Examples**
test1/job.tf
```hcl
resource "aws_batch_job_definition" "foobar" {
name = "foobar"
type = "container"
container_properties = file("${path.module}/job_definition.json")
}
```
**Exception Trace**
Please share the trace for the exception and all relevant output by checkov.
To maximize the understanding, please run checkov with LOG_LEVEL set to debug
as follows:
```sh
2022-02-08 16:55:00,816 [MainThread ] [DEBUG] Checkov version: 2.0.809
2022-02-08 16:55:00,816 [MainThread ] [DEBUG] Python executable: /usr/bin/python3
2022-02-08 16:55:00,816 [MainThread ] [DEBUG] Python version: 3.10.2 (main, Jan 17 2022, 00:00:00) [GCC 11.2.1 20211203 (Red Hat 11.2.1-7)]
2022-02-08 16:55:00,816 [MainThread ] [DEBUG] Checkov executable (argv[0]): /home/foobar/.local/bin/checkov
2022-02-08 16:55:00,816 [MainThread ] [DEBUG] Command Line Args: -d test1/
Config File (/home/foobar/git/production/.checkov.yaml):
framework: terraform
baseline: .checkov.baseline
evaluate-variables:True
compact: True
quiet: True
Defaults:
--branch: master
--download-external-modules:False
--external-modules-download-path:.external_modules
--min-cve-severity:none
2022-02-08 16:55:00,817 [MainThread ] [INFO ] Resultant set of frameworks (removing skipped frameworks): terraform
2022-02-08 16:55:00,817 [MainThread ] [DEBUG] terraform_runner declares no system dependency checks required.
2022-02-08 16:55:00,817 [MainThread ] [DEBUG] No API key found. Scanning locally only.
2022-02-08 16:55:01,377 [MainThread ] [DEBUG] Got checkov mappings from Bridgecrew BE
2022-02-08 16:55:01,378 [MainThread ] [INFO ] Scanning root folder and producing fresh tf_definitions and context
2022-02-08 16:55:01,378 [MainThread ] [INFO ] Parsing HCL files in source dir
2022-02-08 16:55:01,379 [MainThread ] [DEBUG] Parsing /home/foobar/git/production/test1/job.tf
2022-02-08 16:55:01,383 [MainThread ] [DEBUG] Module load loop 0
2022-02-08 16:55:01,384 [MainThread ] [INFO ] Building graph from parsed module
2022-02-08 16:55:01,384 [MainThread ] [INFO ] Creating vertices
2022-02-08 16:55:01,384 [MainThread ] [INFO ] Creating edges
2022-02-08 16:55:01,385 [MainThread ] [INFO ] Rendering variables, graph has 1 vertices and 0 edges
2022-02-08 16:55:01,385 [MainThread ] [INFO ] done evaluating edges
2022-02-08 16:55:01,385 [MainThread ] [INFO ] done evaluate_non_rendered_values
2022-02-08 16:55:01,386 [MainThread ] [DEBUG] created context for aws_batch_job_definition foobar
2022-02-08 16:55:01,386 [MainThread ] [DEBUG] Created definitions context
2022-02-08 16:55:01,386 [MainThread ] [DEBUG] Scanning file: /job.tf
2022-02-08 16:55:01,386 [MainThread ] [DEBUG] Running check: Batch job does not define a privileged container on file /job.tf
2022-02-08 16:55:01,386 [MainThread ] [ERROR] Failed to run check: Batch job does not define a privileged container for configuration: {'container_properties': ['file("path.module/job_definition.json")'], 'name': ['foobar'], 'type': ['container']} at file: /job.tf
Traceback (most recent call last):
File "/home/foobar/.local/bin/checkov", line 9, in <module>
sys.exit(run())
File "/home/foobar/.local/lib/python3.10/site-packages/checkov/main.py", line 223, in run
scan_reports = runner_registry.run(root_folder=root_folder, external_checks_dir=external_checks_dir,
File "/home/foobar/.local/lib/python3.10/site-packages/checkov/common/runners/runner_registry.py", line 59, in run
reports = [self.runners[0].run(root_folder, external_checks_dir=external_checks_dir, files=files,
File "/home/foobar/.local/lib/python3.10/site-packages/checkov/terraform/runner.py", line 119, in run
self.check_tf_definition(report, root_folder, runner_filter, collect_skip_comments)
File "/home/foobar/.local/lib/python3.10/site-packages/checkov/terraform/runner.py", line 212, in check_tf_definition
self.run_all_blocks(definition, self.context, full_file_path, root_folder, report,
File "/home/foobar/.local/lib/python3.10/site-packages/checkov/terraform/runner.py", line 222, in run_all_blocks
self.run_block(definition[block_type], definitions_context,
File "/home/foobar/.local/lib/python3.10/site-packages/checkov/terraform/runner.py", line 294, in run_block
results = registry.scan(scanned_file, entity, skipped_checks, runner_filter)
File "/home/foobar/.local/lib/python3.10/site-packages/checkov/common/checks/base_check_registry.py", line 121, in scan
result = self.run_check(check, entity_configuration, entity_name, entity_type, scanned_file, skip_info)
File "/home/foobar/.local/lib/python3.10/site-packages/checkov/common/checks/base_check_registry.py", line 135, in run_check
result = check.run(
File "/home/foobar/.local/lib/python3.10/site-packages/checkov/common/checks/base_check.py", line 81, in run
raise e
File "/home/foobar/.local/lib/python3.10/site-packages/checkov/common/checks/base_check.py", line 68, in run
check_result["result"] = self.scan_entity_conf(entity_configuration, entity_type)
File "/home/foobar/.local/lib/python3.10/site-packages/checkov/terraform/checks/resource/base_resource_check.py", line 54, in scan_entity_conf
return self.scan_resource_conf(conf)
File "/home/foobar/.local/lib/python3.10/site-packages/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py", line 17, in scan_resource_conf
container = json.loads(conf.get("container_properties")[0])
File "/usr/lib64/python3.10/json/__init__.py", line 346, in loads
return _default_decoder.decode(s)
File "/usr/lib64/python3.10/json/decoder.py", line 337, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "/usr/lib64/python3.10/json/decoder.py", line 355, in raw_decode
raise JSONDecodeError("Expecting value", s, err.value) from None
json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
```
**Desktop (please complete the following information):**
- OS: Fedora 35
- Checkov Version: 2.0.809
**Additional context**
This issue appears from https://github.com/bridgecrewio/checkov/pull/2356
</issue>
<code>
[start of checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py]
1 import json
2 from checkov.common.models.enums import CheckResult, CheckCategories
3 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
4
5
6 class BatchJobIsNotPrivileged(BaseResourceCheck):
7 def __init__(self):
8 name = "Batch job does not define a privileged container"
9 id = "CKV_AWS_210"
10 supported_resources = ['aws_batch_job_definition']
11 categories = [CheckCategories.GENERAL_SECURITY]
12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
13
14 def scan_resource_conf(self, conf):
15 if conf.get("container_properties"):
16 if type(conf.get("container_properties")[0]) is str:
17 container = json.loads(conf.get("container_properties")[0])
18 else:
19 container = conf.get("container_properties")[0]
20 if container.get("privileged"):
21 return CheckResult.FAILED
22 return CheckResult.PASSED
23 return CheckResult.UNKNOWN
24
25
26 check = BatchJobIsNotPrivileged()
27
[end of checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py b/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py
--- a/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py
+++ b/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py
@@ -14,7 +14,10 @@
def scan_resource_conf(self, conf):
if conf.get("container_properties"):
if type(conf.get("container_properties")[0]) is str:
- container = json.loads(conf.get("container_properties")[0])
+ try:
+ container = json.loads(conf.get("container_properties")[0])
+ except json.JSONDecodeError as e:
+ return CheckResult.UNKNOWN
else:
container = conf.get("container_properties")[0]
if container.get("privileged"):
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py b/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py\n--- a/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py\n+++ b/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py\n@@ -14,7 +14,10 @@\n def scan_resource_conf(self, conf):\n if conf.get(\"container_properties\"):\n if type(conf.get(\"container_properties\")[0]) is str:\n- container = json.loads(conf.get(\"container_properties\")[0])\n+ try:\n+ container = json.loads(conf.get(\"container_properties\")[0])\n+ except json.JSONDecodeError as e:\n+ return CheckResult.UNKNOWN\n else:\n container = conf.get(\"container_properties\")[0]\n if container.get(\"privileged\"):\n", "issue": "crash introduce by https://github.com/bridgecrewio/checkov/pull/2356\n**Describe the issue**\r\n\r\n```\r\ncheckov -d test1/\r\n```\r\n\r\n**Examples**\r\ntest1/job.tf\r\n```hcl\r\nresource \"aws_batch_job_definition\" \"foobar\" {\r\n name = \"foobar\"\r\n type = \"container\"\r\n container_properties = file(\"${path.module}/job_definition.json\")\r\n}\r\n```\r\n\r\n**Exception Trace**\r\nPlease share the trace for the exception and all relevant output by checkov.\r\nTo maximize the understanding, please run checkov with LOG_LEVEL set to debug\r\nas follows:\r\n```sh\r\n2022-02-08 16:55:00,816 [MainThread ] [DEBUG] Checkov version: 2.0.809\r\n2022-02-08 16:55:00,816 [MainThread ] [DEBUG] Python executable: /usr/bin/python3\r\n2022-02-08 16:55:00,816 [MainThread ] [DEBUG] Python version: 3.10.2 (main, Jan 17 2022, 00:00:00) [GCC 11.2.1 20211203 (Red Hat 11.2.1-7)]\r\n2022-02-08 16:55:00,816 [MainThread ] [DEBUG] Checkov executable (argv[0]): /home/foobar/.local/bin/checkov\r\n2022-02-08 16:55:00,816 [MainThread ] [DEBUG] Command Line Args: -d test1/\r\nConfig File (/home/foobar/git/production/.checkov.yaml):\r\n framework: terraform\r\n baseline: .checkov.baseline\r\n evaluate-variables:True\r\n compact: True\r\n quiet: True\r\nDefaults:\r\n --branch: master\r\n --download-external-modules:False\r\n --external-modules-download-path:.external_modules\r\n --min-cve-severity:none\r\n\r\n2022-02-08 16:55:00,817 [MainThread ] [INFO ] Resultant set of frameworks (removing skipped frameworks): terraform\r\n2022-02-08 16:55:00,817 [MainThread ] [DEBUG] terraform_runner declares no system dependency checks required.\r\n2022-02-08 16:55:00,817 [MainThread ] [DEBUG] No API key found. Scanning locally only.\r\n2022-02-08 16:55:01,377 [MainThread ] [DEBUG] Got checkov mappings from Bridgecrew BE\r\n2022-02-08 16:55:01,378 [MainThread ] [INFO ] Scanning root folder and producing fresh tf_definitions and context\r\n2022-02-08 16:55:01,378 [MainThread ] [INFO ] Parsing HCL files in source dir\r\n2022-02-08 16:55:01,379 [MainThread ] [DEBUG] Parsing /home/foobar/git/production/test1/job.tf\r\n2022-02-08 16:55:01,383 [MainThread ] [DEBUG] Module load loop 0\r\n2022-02-08 16:55:01,384 [MainThread ] [INFO ] Building graph from parsed module\r\n2022-02-08 16:55:01,384 [MainThread ] [INFO ] Creating vertices\r\n2022-02-08 16:55:01,384 [MainThread ] [INFO ] Creating edges\r\n2022-02-08 16:55:01,385 [MainThread ] [INFO ] Rendering variables, graph has 1 vertices and 0 edges\r\n2022-02-08 16:55:01,385 [MainThread ] [INFO ] done evaluating edges\r\n2022-02-08 16:55:01,385 [MainThread ] [INFO ] done evaluate_non_rendered_values\r\n2022-02-08 16:55:01,386 [MainThread ] [DEBUG] created context for aws_batch_job_definition foobar\r\n2022-02-08 16:55:01,386 [MainThread ] [DEBUG] Created definitions context\r\n2022-02-08 16:55:01,386 [MainThread ] [DEBUG] Scanning file: /job.tf\r\n2022-02-08 16:55:01,386 [MainThread ] [DEBUG] Running check: Batch job does not define a privileged container on file /job.tf\r\n2022-02-08 16:55:01,386 [MainThread ] [ERROR] Failed to run check: Batch job does not define a privileged container for configuration: {'container_properties': ['file(\"path.module/job_definition.json\")'], 'name': ['foobar'], 'type': ['container']} at file: /job.tf\r\nTraceback (most recent call last):\r\n File \"/home/foobar/.local/bin/checkov\", line 9, in <module>\r\n sys.exit(run())\r\n File \"/home/foobar/.local/lib/python3.10/site-packages/checkov/main.py\", line 223, in run\r\n scan_reports = runner_registry.run(root_folder=root_folder, external_checks_dir=external_checks_dir,\r\n File \"/home/foobar/.local/lib/python3.10/site-packages/checkov/common/runners/runner_registry.py\", line 59, in run\r\n reports = [self.runners[0].run(root_folder, external_checks_dir=external_checks_dir, files=files,\r\n File \"/home/foobar/.local/lib/python3.10/site-packages/checkov/terraform/runner.py\", line 119, in run\r\n self.check_tf_definition(report, root_folder, runner_filter, collect_skip_comments)\r\n File \"/home/foobar/.local/lib/python3.10/site-packages/checkov/terraform/runner.py\", line 212, in check_tf_definition\r\n self.run_all_blocks(definition, self.context, full_file_path, root_folder, report,\r\n File \"/home/foobar/.local/lib/python3.10/site-packages/checkov/terraform/runner.py\", line 222, in run_all_blocks\r\n self.run_block(definition[block_type], definitions_context,\r\n File \"/home/foobar/.local/lib/python3.10/site-packages/checkov/terraform/runner.py\", line 294, in run_block\r\n results = registry.scan(scanned_file, entity, skipped_checks, runner_filter)\r\n File \"/home/foobar/.local/lib/python3.10/site-packages/checkov/common/checks/base_check_registry.py\", line 121, in scan\r\n result = self.run_check(check, entity_configuration, entity_name, entity_type, scanned_file, skip_info)\r\n File \"/home/foobar/.local/lib/python3.10/site-packages/checkov/common/checks/base_check_registry.py\", line 135, in run_check\r\n result = check.run(\r\n File \"/home/foobar/.local/lib/python3.10/site-packages/checkov/common/checks/base_check.py\", line 81, in run\r\n raise e\r\n File \"/home/foobar/.local/lib/python3.10/site-packages/checkov/common/checks/base_check.py\", line 68, in run\r\n check_result[\"result\"] = self.scan_entity_conf(entity_configuration, entity_type)\r\n File \"/home/foobar/.local/lib/python3.10/site-packages/checkov/terraform/checks/resource/base_resource_check.py\", line 54, in scan_entity_conf\r\n return self.scan_resource_conf(conf)\r\n File \"/home/foobar/.local/lib/python3.10/site-packages/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py\", line 17, in scan_resource_conf\r\n container = json.loads(conf.get(\"container_properties\")[0])\r\n File \"/usr/lib64/python3.10/json/__init__.py\", line 346, in loads\r\n return _default_decoder.decode(s)\r\n File \"/usr/lib64/python3.10/json/decoder.py\", line 337, in decode\r\n obj, end = self.raw_decode(s, idx=_w(s, 0).end())\r\n File \"/usr/lib64/python3.10/json/decoder.py\", line 355, in raw_decode\r\n raise JSONDecodeError(\"Expecting value\", s, err.value) from None\r\njson.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)\r\n```\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Fedora 35\r\n - Checkov Version: 2.0.809\r\n\r\n**Additional context**\r\nThis issue appears from https://github.com/bridgecrewio/checkov/pull/2356\n", "before_files": [{"content": "import json\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass BatchJobIsNotPrivileged(BaseResourceCheck):\n def __init__(self):\n name = \"Batch job does not define a privileged container\"\n id = \"CKV_AWS_210\"\n supported_resources = ['aws_batch_job_definition']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if conf.get(\"container_properties\"):\n if type(conf.get(\"container_properties\")[0]) is str:\n container = json.loads(conf.get(\"container_properties\")[0])\n else:\n container = conf.get(\"container_properties\")[0]\n if container.get(\"privileged\"):\n return CheckResult.FAILED\n return CheckResult.PASSED\n return CheckResult.UNKNOWN\n\n\ncheck = BatchJobIsNotPrivileged()\n", "path": "checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py"}]} | 2,946 | 190 |
gh_patches_debug_33454 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-1690 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
button to add group lists from group page
**Is your feature request related to a problem? Please describe.**
At the moment if you want to add a group-curated list, you have to do it from the Lists page, rather than the home page of the Group. This makes sense if you know how the backend functionality works but is not very intuitive.
**Describe the solution you'd like**
Add a `Create list` button (like the one in the lists view) to the Group page.
**Describe alternatives you've considered**
Leaving the current, obscure, functionality.
**Additional context**
I intended to add this once the initial group functionality was completed, but I forgot about it.
This Issue is to remind me to put in a PR to add this.
</issue>
<code>
[start of bookwyrm/views/group.py]
1 """group views"""
2 from django.apps import apps
3 from django.contrib.auth.decorators import login_required
4 from django.db import IntegrityError
5 from django.core.paginator import Paginator
6 from django.http import HttpResponseBadRequest
7 from django.shortcuts import get_object_or_404, redirect
8 from django.template.response import TemplateResponse
9 from django.utils.decorators import method_decorator
10 from django.views import View
11 from django.views.decorators.http import require_POST
12 from django.contrib.postgres.search import TrigramSimilarity
13 from django.db.models.functions import Greatest
14
15 from bookwyrm import forms, models
16 from bookwyrm.suggested_users import suggested_users
17 from .helpers import get_user_from_username
18
19 # pylint: disable=no-self-use
20 class Group(View):
21 """group page"""
22
23 def get(self, request, group_id):
24 """display a group"""
25
26 group = get_object_or_404(models.Group, id=group_id)
27 group.raise_visible_to_user(request.user)
28 lists = (
29 models.List.privacy_filter(request.user)
30 .filter(group=group)
31 .order_by("-updated_date")
32 )
33
34 data = {
35 "group": group,
36 "lists": lists,
37 "group_form": forms.GroupForm(instance=group),
38 "path": "/group",
39 }
40 return TemplateResponse(request, "groups/group.html", data)
41
42 @method_decorator(login_required, name="dispatch")
43 def post(self, request, group_id):
44 """edit a group"""
45 user_group = get_object_or_404(models.Group, id=group_id)
46 form = forms.GroupForm(request.POST, instance=user_group)
47 if not form.is_valid():
48 return redirect("group", user_group.id)
49 user_group = form.save()
50
51 # let the other members know something about the group changed
52 memberships = models.GroupMember.objects.filter(group=user_group)
53 model = apps.get_model("bookwyrm.Notification", require_ready=True)
54 for field in form.changed_data:
55 notification_type = (
56 "GROUP_PRIVACY"
57 if field == "privacy"
58 else "GROUP_NAME"
59 if field == "name"
60 else "GROUP_DESCRIPTION"
61 if field == "description"
62 else None
63 )
64 if notification_type:
65 for membership in memberships:
66 member = membership.user
67 if member != request.user:
68 model.objects.create(
69 user=member,
70 related_user=request.user,
71 related_group=user_group,
72 notification_type=notification_type,
73 )
74
75 return redirect("group", user_group.id)
76
77
78 @method_decorator(login_required, name="dispatch")
79 class UserGroups(View):
80 """a user's groups page"""
81
82 def get(self, request, username):
83 """display a group"""
84 user = get_user_from_username(request.user, username)
85 groups = (
86 models.Group.privacy_filter(request.user)
87 .filter(memberships__user=user)
88 .order_by("-updated_date")
89 )
90 paginated = Paginator(groups, 12)
91
92 data = {
93 "groups": paginated.get_page(request.GET.get("page")),
94 "is_self": request.user.id == user.id,
95 "user": user,
96 "group_form": forms.GroupForm(),
97 "path": user.local_path + "/group",
98 }
99 return TemplateResponse(request, "user/groups.html", data)
100
101 @method_decorator(login_required, name="dispatch")
102 # pylint: disable=unused-argument
103 def post(self, request, username):
104 """create a user group"""
105 form = forms.GroupForm(request.POST)
106 if not form.is_valid():
107 return redirect(request.user.local_path + "/groups")
108 group = form.save()
109 # add the creator as a group member
110 models.GroupMember.objects.create(group=group, user=request.user)
111 return redirect("group", group.id)
112
113
114 @method_decorator(login_required, name="dispatch")
115 class FindUsers(View):
116 """find friends to add to your group"""
117
118 # this is mostly borrowed from the Get Started friend finder
119
120 def get(self, request, group_id):
121 """basic profile info"""
122 user_query = request.GET.get("user_query")
123 group = get_object_or_404(models.Group, id=group_id)
124
125 if not group:
126 return HttpResponseBadRequest()
127
128 if not group.user == request.user:
129 return HttpResponseBadRequest()
130
131 user_results = (
132 models.User.viewer_aware_objects(request.user)
133 .exclude(
134 memberships__in=group.memberships.all()
135 ) # don't suggest users who are already members
136 .annotate(
137 similarity=Greatest(
138 TrigramSimilarity("username", user_query),
139 TrigramSimilarity("localname", user_query),
140 )
141 )
142 .filter(similarity__gt=0.5, local=True)
143 .order_by("-similarity")[:5]
144 )
145 data = {"no_results": not user_results}
146
147 if user_results.count() < 5:
148 user_results = list(user_results) + suggested_users.get_suggestions(
149 request.user, local=True
150 )
151
152 data = {
153 "suggested_users": user_results,
154 "group": group,
155 "group_form": forms.GroupForm(instance=group),
156 "user_query": user_query,
157 "requestor_is_manager": request.user == group.user,
158 }
159 return TemplateResponse(request, "groups/find_users.html", data)
160
161
162 @require_POST
163 @login_required
164 def delete_group(request, group_id):
165 """delete a group"""
166 group = get_object_or_404(models.Group, id=group_id)
167
168 # only the owner can delete a group
169 group.raise_not_deletable(request.user)
170
171 # deal with any group lists
172 models.List.objects.filter(group=group).update(curation="closed", group=None)
173
174 group.delete()
175 return redirect(request.user.local_path + "/groups")
176
177
178 @require_POST
179 @login_required
180 def invite_member(request):
181 """invite a member to the group"""
182 group = get_object_or_404(models.Group, id=request.POST.get("group"))
183 user = get_user_from_username(request.user, request.POST["user"])
184
185 if not group.user == request.user:
186 return HttpResponseBadRequest()
187
188 try:
189 models.GroupMemberInvitation.objects.create(user=user, group=group)
190 except IntegrityError:
191 pass
192
193 return redirect(user.local_path)
194
195
196 @require_POST
197 @login_required
198 def remove_member(request):
199 """remove a member from the group"""
200 group = get_object_or_404(models.Group, id=request.POST.get("group"))
201 user = get_user_from_username(request.user, request.POST["user"])
202
203 # you can't be removed from your own group
204 if user == group.user:
205 return HttpResponseBadRequest()
206
207 is_member = models.GroupMember.objects.filter(group=group, user=user).exists()
208 is_invited = models.GroupMemberInvitation.objects.filter(
209 group=group, user=user
210 ).exists()
211
212 if is_invited:
213 try:
214 invitation = models.GroupMemberInvitation.objects.get(
215 user=user, group=group
216 )
217
218 invitation.reject()
219
220 except IntegrityError:
221 pass
222
223 if is_member:
224 try:
225 models.List.remove_from_group(group.user, user)
226 models.GroupMember.remove(group.user, user)
227 except IntegrityError:
228 pass
229
230 memberships = models.GroupMember.objects.filter(group=group)
231 model = apps.get_model("bookwyrm.Notification", require_ready=True)
232 notification_type = "LEAVE" if user == request.user else "REMOVE"
233 # let the other members know about it
234 for membership in memberships:
235 member = membership.user
236 if member != request.user:
237 model.objects.create(
238 user=member,
239 related_user=user,
240 related_group=group,
241 notification_type=notification_type,
242 )
243
244 # let the user (now ex-member) know as well, if they were removed
245 if notification_type == "REMOVE":
246 model.objects.create(
247 user=user,
248 related_group=group,
249 notification_type=notification_type,
250 )
251
252 return redirect(group.local_path)
253
254
255 @require_POST
256 @login_required
257 def accept_membership(request):
258 """accept an invitation to join a group"""
259 group = get_object_or_404(models.Group, id=request.POST.get("group"))
260 invite = get_object_or_404(
261 models.GroupMemberInvitation, group=group, user=request.user
262 )
263
264 try:
265 invite.accept()
266 except IntegrityError:
267 pass
268
269 return redirect(group.local_path)
270
271
272 @require_POST
273 @login_required
274 def reject_membership(request):
275 """reject an invitation to join a group"""
276 group = get_object_or_404(models.Group, id=request.POST.get("group"))
277 invite = get_object_or_404(
278 models.GroupMemberInvitation, group=group, user=request.user
279 )
280
281 invite.reject()
282 return redirect(request.user.local_path)
283
[end of bookwyrm/views/group.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bookwyrm/views/group.py b/bookwyrm/views/group.py
--- a/bookwyrm/views/group.py
+++ b/bookwyrm/views/group.py
@@ -34,7 +34,8 @@
data = {
"group": group,
"lists": lists,
- "group_form": forms.GroupForm(instance=group),
+ "group_form": forms.GroupForm(instance=group, auto_id="group_form_id_%s"),
+ "list_form": forms.ListForm(),
"path": "/group",
}
return TemplateResponse(request, "groups/group.html", data)
@@ -121,6 +122,11 @@
"""basic profile info"""
user_query = request.GET.get("user_query")
group = get_object_or_404(models.Group, id=group_id)
+ lists = (
+ models.List.privacy_filter(request.user)
+ .filter(group=group)
+ .order_by("-updated_date")
+ )
if not group:
return HttpResponseBadRequest()
@@ -142,7 +148,7 @@
.filter(similarity__gt=0.5, local=True)
.order_by("-similarity")[:5]
)
- data = {"no_results": not user_results}
+ no_results = not user_results
if user_results.count() < 5:
user_results = list(user_results) + suggested_users.get_suggestions(
@@ -151,8 +157,11 @@
data = {
"suggested_users": user_results,
+ "no_results": no_results,
"group": group,
- "group_form": forms.GroupForm(instance=group),
+ "lists": lists,
+ "group_form": forms.GroupForm(instance=group, auto_id="group_form_id_%s"),
+ "list_form": forms.ListForm(),
"user_query": user_query,
"requestor_is_manager": request.user == group.user,
}
| {"golden_diff": "diff --git a/bookwyrm/views/group.py b/bookwyrm/views/group.py\n--- a/bookwyrm/views/group.py\n+++ b/bookwyrm/views/group.py\n@@ -34,7 +34,8 @@\n data = {\n \"group\": group,\n \"lists\": lists,\n- \"group_form\": forms.GroupForm(instance=group),\n+ \"group_form\": forms.GroupForm(instance=group, auto_id=\"group_form_id_%s\"),\n+ \"list_form\": forms.ListForm(),\n \"path\": \"/group\",\n }\n return TemplateResponse(request, \"groups/group.html\", data)\n@@ -121,6 +122,11 @@\n \"\"\"basic profile info\"\"\"\n user_query = request.GET.get(\"user_query\")\n group = get_object_or_404(models.Group, id=group_id)\n+ lists = (\n+ models.List.privacy_filter(request.user)\n+ .filter(group=group)\n+ .order_by(\"-updated_date\")\n+ )\n \n if not group:\n return HttpResponseBadRequest()\n@@ -142,7 +148,7 @@\n .filter(similarity__gt=0.5, local=True)\n .order_by(\"-similarity\")[:5]\n )\n- data = {\"no_results\": not user_results}\n+ no_results = not user_results\n \n if user_results.count() < 5:\n user_results = list(user_results) + suggested_users.get_suggestions(\n@@ -151,8 +157,11 @@\n \n data = {\n \"suggested_users\": user_results,\n+ \"no_results\": no_results,\n \"group\": group,\n- \"group_form\": forms.GroupForm(instance=group),\n+ \"lists\": lists,\n+ \"group_form\": forms.GroupForm(instance=group, auto_id=\"group_form_id_%s\"),\n+ \"list_form\": forms.ListForm(),\n \"user_query\": user_query,\n \"requestor_is_manager\": request.user == group.user,\n }\n", "issue": "button to add group lists from group page\n**Is your feature request related to a problem? Please describe.**\r\nAt the moment if you want to add a group-curated list, you have to do it from the Lists page, rather than the home page of the Group. This makes sense if you know how the backend functionality works but is not very intuitive.\r\n\r\n**Describe the solution you'd like**\r\n Add a `Create list` button (like the one in the lists view) to the Group page.\r\n\r\n**Describe alternatives you've considered**\r\nLeaving the current, obscure, functionality.\r\n\r\n**Additional context**\r\nI intended to add this once the initial group functionality was completed, but I forgot about it.\r\nThis Issue is to remind me to put in a PR to add this.\r\n\n", "before_files": [{"content": "\"\"\"group views\"\"\"\nfrom django.apps import apps\nfrom django.contrib.auth.decorators import login_required\nfrom django.db import IntegrityError\nfrom django.core.paginator import Paginator\nfrom django.http import HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom django.views.decorators.http import require_POST\nfrom django.contrib.postgres.search import TrigramSimilarity\nfrom django.db.models.functions import Greatest\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.suggested_users import suggested_users\nfrom .helpers import get_user_from_username\n\n# pylint: disable=no-self-use\nclass Group(View):\n \"\"\"group page\"\"\"\n\n def get(self, request, group_id):\n \"\"\"display a group\"\"\"\n\n group = get_object_or_404(models.Group, id=group_id)\n group.raise_visible_to_user(request.user)\n lists = (\n models.List.privacy_filter(request.user)\n .filter(group=group)\n .order_by(\"-updated_date\")\n )\n\n data = {\n \"group\": group,\n \"lists\": lists,\n \"group_form\": forms.GroupForm(instance=group),\n \"path\": \"/group\",\n }\n return TemplateResponse(request, \"groups/group.html\", data)\n\n @method_decorator(login_required, name=\"dispatch\")\n def post(self, request, group_id):\n \"\"\"edit a group\"\"\"\n user_group = get_object_or_404(models.Group, id=group_id)\n form = forms.GroupForm(request.POST, instance=user_group)\n if not form.is_valid():\n return redirect(\"group\", user_group.id)\n user_group = form.save()\n\n # let the other members know something about the group changed\n memberships = models.GroupMember.objects.filter(group=user_group)\n model = apps.get_model(\"bookwyrm.Notification\", require_ready=True)\n for field in form.changed_data:\n notification_type = (\n \"GROUP_PRIVACY\"\n if field == \"privacy\"\n else \"GROUP_NAME\"\n if field == \"name\"\n else \"GROUP_DESCRIPTION\"\n if field == \"description\"\n else None\n )\n if notification_type:\n for membership in memberships:\n member = membership.user\n if member != request.user:\n model.objects.create(\n user=member,\n related_user=request.user,\n related_group=user_group,\n notification_type=notification_type,\n )\n\n return redirect(\"group\", user_group.id)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass UserGroups(View):\n \"\"\"a user's groups page\"\"\"\n\n def get(self, request, username):\n \"\"\"display a group\"\"\"\n user = get_user_from_username(request.user, username)\n groups = (\n models.Group.privacy_filter(request.user)\n .filter(memberships__user=user)\n .order_by(\"-updated_date\")\n )\n paginated = Paginator(groups, 12)\n\n data = {\n \"groups\": paginated.get_page(request.GET.get(\"page\")),\n \"is_self\": request.user.id == user.id,\n \"user\": user,\n \"group_form\": forms.GroupForm(),\n \"path\": user.local_path + \"/group\",\n }\n return TemplateResponse(request, \"user/groups.html\", data)\n\n @method_decorator(login_required, name=\"dispatch\")\n # pylint: disable=unused-argument\n def post(self, request, username):\n \"\"\"create a user group\"\"\"\n form = forms.GroupForm(request.POST)\n if not form.is_valid():\n return redirect(request.user.local_path + \"/groups\")\n group = form.save()\n # add the creator as a group member\n models.GroupMember.objects.create(group=group, user=request.user)\n return redirect(\"group\", group.id)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass FindUsers(View):\n \"\"\"find friends to add to your group\"\"\"\n\n # this is mostly borrowed from the Get Started friend finder\n\n def get(self, request, group_id):\n \"\"\"basic profile info\"\"\"\n user_query = request.GET.get(\"user_query\")\n group = get_object_or_404(models.Group, id=group_id)\n\n if not group:\n return HttpResponseBadRequest()\n\n if not group.user == request.user:\n return HttpResponseBadRequest()\n\n user_results = (\n models.User.viewer_aware_objects(request.user)\n .exclude(\n memberships__in=group.memberships.all()\n ) # don't suggest users who are already members\n .annotate(\n similarity=Greatest(\n TrigramSimilarity(\"username\", user_query),\n TrigramSimilarity(\"localname\", user_query),\n )\n )\n .filter(similarity__gt=0.5, local=True)\n .order_by(\"-similarity\")[:5]\n )\n data = {\"no_results\": not user_results}\n\n if user_results.count() < 5:\n user_results = list(user_results) + suggested_users.get_suggestions(\n request.user, local=True\n )\n\n data = {\n \"suggested_users\": user_results,\n \"group\": group,\n \"group_form\": forms.GroupForm(instance=group),\n \"user_query\": user_query,\n \"requestor_is_manager\": request.user == group.user,\n }\n return TemplateResponse(request, \"groups/find_users.html\", data)\n\n\n@require_POST\n@login_required\ndef delete_group(request, group_id):\n \"\"\"delete a group\"\"\"\n group = get_object_or_404(models.Group, id=group_id)\n\n # only the owner can delete a group\n group.raise_not_deletable(request.user)\n\n # deal with any group lists\n models.List.objects.filter(group=group).update(curation=\"closed\", group=None)\n\n group.delete()\n return redirect(request.user.local_path + \"/groups\")\n\n\n@require_POST\n@login_required\ndef invite_member(request):\n \"\"\"invite a member to the group\"\"\"\n group = get_object_or_404(models.Group, id=request.POST.get(\"group\"))\n user = get_user_from_username(request.user, request.POST[\"user\"])\n\n if not group.user == request.user:\n return HttpResponseBadRequest()\n\n try:\n models.GroupMemberInvitation.objects.create(user=user, group=group)\n except IntegrityError:\n pass\n\n return redirect(user.local_path)\n\n\n@require_POST\n@login_required\ndef remove_member(request):\n \"\"\"remove a member from the group\"\"\"\n group = get_object_or_404(models.Group, id=request.POST.get(\"group\"))\n user = get_user_from_username(request.user, request.POST[\"user\"])\n\n # you can't be removed from your own group\n if user == group.user:\n return HttpResponseBadRequest()\n\n is_member = models.GroupMember.objects.filter(group=group, user=user).exists()\n is_invited = models.GroupMemberInvitation.objects.filter(\n group=group, user=user\n ).exists()\n\n if is_invited:\n try:\n invitation = models.GroupMemberInvitation.objects.get(\n user=user, group=group\n )\n\n invitation.reject()\n\n except IntegrityError:\n pass\n\n if is_member:\n try:\n models.List.remove_from_group(group.user, user)\n models.GroupMember.remove(group.user, user)\n except IntegrityError:\n pass\n\n memberships = models.GroupMember.objects.filter(group=group)\n model = apps.get_model(\"bookwyrm.Notification\", require_ready=True)\n notification_type = \"LEAVE\" if user == request.user else \"REMOVE\"\n # let the other members know about it\n for membership in memberships:\n member = membership.user\n if member != request.user:\n model.objects.create(\n user=member,\n related_user=user,\n related_group=group,\n notification_type=notification_type,\n )\n\n # let the user (now ex-member) know as well, if they were removed\n if notification_type == \"REMOVE\":\n model.objects.create(\n user=user,\n related_group=group,\n notification_type=notification_type,\n )\n\n return redirect(group.local_path)\n\n\n@require_POST\n@login_required\ndef accept_membership(request):\n \"\"\"accept an invitation to join a group\"\"\"\n group = get_object_or_404(models.Group, id=request.POST.get(\"group\"))\n invite = get_object_or_404(\n models.GroupMemberInvitation, group=group, user=request.user\n )\n\n try:\n invite.accept()\n except IntegrityError:\n pass\n\n return redirect(group.local_path)\n\n\n@require_POST\n@login_required\ndef reject_membership(request):\n \"\"\"reject an invitation to join a group\"\"\"\n group = get_object_or_404(models.Group, id=request.POST.get(\"group\"))\n invite = get_object_or_404(\n models.GroupMemberInvitation, group=group, user=request.user\n )\n\n invite.reject()\n return redirect(request.user.local_path)\n", "path": "bookwyrm/views/group.py"}]} | 3,348 | 434 |
gh_patches_debug_14220 | rasdani/github-patches | git_diff | fossasia__open-event-server-5229 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow bank, cheque and onsite for payment_mode in orders schema
**Describe the bug**
Currently setting bank, cheque and onsite for payment_mode returns 422 error.
**Expected behavior**
Payment Mode should allow bank, cheque and onsite as options.
</issue>
<code>
[start of app/api/schema/orders.py]
1 from flask import request
2 from marshmallow import post_dump, validates_schema, validate
3 from marshmallow_jsonapi import fields
4 from marshmallow_jsonapi.flask import Relationship
5
6 from app import db
7 from app.api.helpers.utilities import dasherize
8 from app.api.schema.base import SoftDeletionSchema
9 from utils.common import use_defaults
10
11
12 class OnSiteTicketSchema(SoftDeletionSchema):
13 class Meta:
14 type_ = 'on-site-ticket'
15 inflect = dasherize
16
17 id = fields.Str(load_only=True, required=True)
18 quantity = fields.Str(load_only=True, required=True)
19
20
21 @use_defaults()
22 class OrderSchema(SoftDeletionSchema):
23 class Meta:
24 type_ = 'order'
25 self_view = 'v1.order_detail'
26 self_view_kwargs = {'order_identifier': '<identifier>'}
27 inflect = dasherize
28
29 @post_dump
30 def generate_payment_url(self, data):
31 """
32 generate payment url for an order
33 :param data:
34 :return:
35 """
36 if 'POST' in request.method or ('GET' in request.method and 'regenerate' in request.args) and 'completed' != \
37 data["status"]:
38 if data['payment_mode'] == 'stripe':
39 data['payment_url'] = 'stripe://payment'
40 return data
41
42 @validates_schema
43 def initial_values(self, data):
44 if data.get('payment_mode') is None and 'POST' in request.method:
45 data['payment_mode'] = 'free'
46 return data
47
48 id = fields.Str(dump_only=True)
49 identifier = fields.Str(dump_only=True)
50 amount = fields.Float(validate=lambda n: n > 0, allow_none=True)
51 address = fields.Str(allow_none=True)
52 city = fields.Str(allow_none=True)
53 state = fields.Str(db.String, allow_none=True)
54 country = fields.Str(allow_none=True)
55 zipcode = fields.Str(allow_none=True)
56 completed_at = fields.DateTime(dump_only=True)
57 created_at = fields.DateTime(dump_only=True)
58 transaction_id = fields.Str(dump_only=True)
59 payment_mode = fields.Str(default="free",
60 validate=validate.OneOf(choices=["free", "stripe", "paypal"]), allow_none=True)
61 paid_via = fields.Str(dump_only=True)
62 brand = fields.Str(dump_only=True)
63 exp_month = fields.Str(dump_only=True)
64 exp_year = fields.Str(dump_only=True)
65 last4 = fields.Str(dump_only=True)
66 status = fields.Str(validate=validate.OneOf(choices=["pending", "cancelled", "completed", "placed", "expired"]))
67 discount_code_id = fields.Str(allow_none=True)
68 payment_url = fields.Str(dump_only=True)
69 cancel_note = fields.Str(allow_none=True)
70 order_notes = fields.Str(allow_none=True)
71 tickets_pdf_url = fields.Url(dump_only=True)
72
73 # only used in the case of an on site attendee.
74 on_site_tickets = fields.List(cls_or_instance=fields.Nested(OnSiteTicketSchema), load_only=True, allow_none=True)
75
76 attendees = Relationship(attribute='ticket_holders',
77 self_view='v1.order_attendee',
78 self_view_kwargs={'order_identifier': '<identifier>'},
79 related_view='v1.attendee_list',
80 related_view_kwargs={'order_identifier': '<identifier>'},
81 schema='AttendeeSchemaPublic',
82 many=True,
83 type_='attendee')
84
85 tickets = Relationship(attribute='tickets',
86 self_view='v1.order_ticket',
87 self_view_kwargs={'order_identifier': '<identifier>'},
88 related_view='v1.ticket_list',
89 related_view_kwargs={'order_identifier': '<identifier>'},
90 schema='TicketSchemaPublic',
91 many=True,
92 type_="ticket")
93
94 user = Relationship(attribute='user',
95 self_view='v1.order_user',
96 self_view_kwargs={'order_identifier': '<identifier>'},
97 related_view='v1.user_detail',
98 related_view_kwargs={'id': '<user_id>'},
99 schema='UserSchemaPublic',
100 type_="user")
101
102 event = Relationship(attribute='event',
103 self_view='v1.order_event',
104 self_view_kwargs={'order_identifier': '<identifier>'},
105 related_view='v1.event_detail',
106 related_view_kwargs={'id': '<event_id>'},
107 schema='EventSchemaPublic',
108 type_="event")
109
110 marketer = Relationship(attribute='marketer',
111 self_view='v1.order_marketer',
112 self_view_kwargs={'order_identifier': '<identifier>'},
113 related_view='v1.user_detail',
114 related_view_kwargs={'id': '<marketer_id>'},
115 schema='UserSchemaPublic',
116 type_="user")
117
118 discount_code = Relationship(attribute='discount_code',
119 self_view='v1.order_discount',
120 self_view_kwargs={'order_identifier': '<identifier>'},
121 related_view='v1.discount_code_detail',
122 related_view_kwargs={'id': '<discount_code_id>'},
123 schema='DiscountCodeSchemaPublic',
124 type_="discount-code")
125
[end of app/api/schema/orders.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/api/schema/orders.py b/app/api/schema/orders.py
--- a/app/api/schema/orders.py
+++ b/app/api/schema/orders.py
@@ -56,8 +56,10 @@
completed_at = fields.DateTime(dump_only=True)
created_at = fields.DateTime(dump_only=True)
transaction_id = fields.Str(dump_only=True)
- payment_mode = fields.Str(default="free",
- validate=validate.OneOf(choices=["free", "stripe", "paypal"]), allow_none=True)
+ payment_mode = fields.Str(
+ default="free",
+ validate=validate.OneOf(choices=["free", "stripe", "paypal", "bank", "cheque", "onsite"]),
+ allow_none=True)
paid_via = fields.Str(dump_only=True)
brand = fields.Str(dump_only=True)
exp_month = fields.Str(dump_only=True)
| {"golden_diff": "diff --git a/app/api/schema/orders.py b/app/api/schema/orders.py\n--- a/app/api/schema/orders.py\n+++ b/app/api/schema/orders.py\n@@ -56,8 +56,10 @@\n completed_at = fields.DateTime(dump_only=True)\n created_at = fields.DateTime(dump_only=True)\n transaction_id = fields.Str(dump_only=True)\n- payment_mode = fields.Str(default=\"free\",\n- validate=validate.OneOf(choices=[\"free\", \"stripe\", \"paypal\"]), allow_none=True)\n+ payment_mode = fields.Str(\n+ default=\"free\",\n+ validate=validate.OneOf(choices=[\"free\", \"stripe\", \"paypal\", \"bank\", \"cheque\", \"onsite\"]),\n+ allow_none=True)\n paid_via = fields.Str(dump_only=True)\n brand = fields.Str(dump_only=True)\n exp_month = fields.Str(dump_only=True)\n", "issue": "Allow bank, cheque and onsite for payment_mode in orders schema\n**Describe the bug**\r\nCurrently setting bank, cheque and onsite for payment_mode returns 422 error.\r\n\r\n**Expected behavior**\r\nPayment Mode should allow bank, cheque and onsite as options.\n", "before_files": [{"content": "from flask import request\nfrom marshmallow import post_dump, validates_schema, validate\nfrom marshmallow_jsonapi import fields\nfrom marshmallow_jsonapi.flask import Relationship\n\nfrom app import db\nfrom app.api.helpers.utilities import dasherize\nfrom app.api.schema.base import SoftDeletionSchema\nfrom utils.common import use_defaults\n\n\nclass OnSiteTicketSchema(SoftDeletionSchema):\n class Meta:\n type_ = 'on-site-ticket'\n inflect = dasherize\n\n id = fields.Str(load_only=True, required=True)\n quantity = fields.Str(load_only=True, required=True)\n\n\n@use_defaults()\nclass OrderSchema(SoftDeletionSchema):\n class Meta:\n type_ = 'order'\n self_view = 'v1.order_detail'\n self_view_kwargs = {'order_identifier': '<identifier>'}\n inflect = dasherize\n\n @post_dump\n def generate_payment_url(self, data):\n \"\"\"\n generate payment url for an order\n :param data:\n :return:\n \"\"\"\n if 'POST' in request.method or ('GET' in request.method and 'regenerate' in request.args) and 'completed' != \\\n data[\"status\"]:\n if data['payment_mode'] == 'stripe':\n data['payment_url'] = 'stripe://payment'\n return data\n\n @validates_schema\n def initial_values(self, data):\n if data.get('payment_mode') is None and 'POST' in request.method:\n data['payment_mode'] = 'free'\n return data\n\n id = fields.Str(dump_only=True)\n identifier = fields.Str(dump_only=True)\n amount = fields.Float(validate=lambda n: n > 0, allow_none=True)\n address = fields.Str(allow_none=True)\n city = fields.Str(allow_none=True)\n state = fields.Str(db.String, allow_none=True)\n country = fields.Str(allow_none=True)\n zipcode = fields.Str(allow_none=True)\n completed_at = fields.DateTime(dump_only=True)\n created_at = fields.DateTime(dump_only=True)\n transaction_id = fields.Str(dump_only=True)\n payment_mode = fields.Str(default=\"free\",\n validate=validate.OneOf(choices=[\"free\", \"stripe\", \"paypal\"]), allow_none=True)\n paid_via = fields.Str(dump_only=True)\n brand = fields.Str(dump_only=True)\n exp_month = fields.Str(dump_only=True)\n exp_year = fields.Str(dump_only=True)\n last4 = fields.Str(dump_only=True)\n status = fields.Str(validate=validate.OneOf(choices=[\"pending\", \"cancelled\", \"completed\", \"placed\", \"expired\"]))\n discount_code_id = fields.Str(allow_none=True)\n payment_url = fields.Str(dump_only=True)\n cancel_note = fields.Str(allow_none=True)\n order_notes = fields.Str(allow_none=True)\n tickets_pdf_url = fields.Url(dump_only=True)\n\n # only used in the case of an on site attendee.\n on_site_tickets = fields.List(cls_or_instance=fields.Nested(OnSiteTicketSchema), load_only=True, allow_none=True)\n\n attendees = Relationship(attribute='ticket_holders',\n self_view='v1.order_attendee',\n self_view_kwargs={'order_identifier': '<identifier>'},\n related_view='v1.attendee_list',\n related_view_kwargs={'order_identifier': '<identifier>'},\n schema='AttendeeSchemaPublic',\n many=True,\n type_='attendee')\n\n tickets = Relationship(attribute='tickets',\n self_view='v1.order_ticket',\n self_view_kwargs={'order_identifier': '<identifier>'},\n related_view='v1.ticket_list',\n related_view_kwargs={'order_identifier': '<identifier>'},\n schema='TicketSchemaPublic',\n many=True,\n type_=\"ticket\")\n\n user = Relationship(attribute='user',\n self_view='v1.order_user',\n self_view_kwargs={'order_identifier': '<identifier>'},\n related_view='v1.user_detail',\n related_view_kwargs={'id': '<user_id>'},\n schema='UserSchemaPublic',\n type_=\"user\")\n\n event = Relationship(attribute='event',\n self_view='v1.order_event',\n self_view_kwargs={'order_identifier': '<identifier>'},\n related_view='v1.event_detail',\n related_view_kwargs={'id': '<event_id>'},\n schema='EventSchemaPublic',\n type_=\"event\")\n\n marketer = Relationship(attribute='marketer',\n self_view='v1.order_marketer',\n self_view_kwargs={'order_identifier': '<identifier>'},\n related_view='v1.user_detail',\n related_view_kwargs={'id': '<marketer_id>'},\n schema='UserSchemaPublic',\n type_=\"user\")\n\n discount_code = Relationship(attribute='discount_code',\n self_view='v1.order_discount',\n self_view_kwargs={'order_identifier': '<identifier>'},\n related_view='v1.discount_code_detail',\n related_view_kwargs={'id': '<discount_code_id>'},\n schema='DiscountCodeSchemaPublic',\n type_=\"discount-code\")\n", "path": "app/api/schema/orders.py"}]} | 1,924 | 190 |
gh_patches_debug_39642 | rasdani/github-patches | git_diff | gammapy__gammapy-3504 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
LightCurveEstimator fails if no dataset contribute in a time bin
**Gammapy version**
dev
**Bug description**
If you run the `LightCurveEstimator` and no dataset are found to contribute in a time interval, it will fail because the `FluxPoints` produced has no `npred` quantity attached.
This is because the `ParameterEstimator.estimate_ts` only return `ts` as `nan` before estimating the `npred`.
https://github.com/gammapy/gammapy/blob/d1ede2663429253b72011b9eda256ed2b48f8e9c/gammapy/estimators/parameter.py#L110
**Expected behavior**
The estimator should run and return `nan` for `npred` as well.
**To Reproduce**
If you want to test, use the light curve validation, script and change the energy range to e.g. 0.5-0.7 TeV in line https://github.com/gammapy/gammapy-benchmarks/blob/8d02a809583a8ddf3d222c5b7db2b542c7f2addf/validation/lightcurve/make.py#L108
it will yield:
```
Traceback (most recent call last):
File "make.py", line 278, in <module>
cli()
File "/Users/terrier/Code/anaconda3/envs/gammapy-dev/lib/python3.7/site-packages/click/core.py", line 1137, in __call__
return self.main(*args, **kwargs)
File "/Users/terrier/Code/anaconda3/envs/gammapy-dev/lib/python3.7/site-packages/click/core.py", line 1062, in main
rv = self.invoke(ctx)
File "/Users/terrier/Code/anaconda3/envs/gammapy-dev/lib/python3.7/site-packages/click/core.py", line 1668, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/terrier/Code/anaconda3/envs/gammapy-dev/lib/python3.7/site-packages/click/core.py", line 1404, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/terrier/Code/anaconda3/envs/gammapy-dev/lib/python3.7/site-packages/click/core.py", line 763, in invoke
return __callback(*args, **kwargs)
File "make.py", line 69, in run_analyses
perform_analysis(analysis_type, short_observations, target_position, time_intervals)
File "make.py", line 114, in perform_analysis
lc = lc_maker.run(datasets)
File "/Users/terrier/Code/gammapy-dev/gammapy/gammapy/estimators/lightcurve.py", line 126, in run
fp._data[name], dataset_names=datasets.names
File "/Users/terrier/Code/gammapy-dev/gammapy/gammapy/maps/maps.py", line 46, in __getitem__
return self._data[key]
KeyError: 'npred'
```
**Other information**
Any other information you think will be useful for us to fix the issue can go here.
</issue>
<code>
[start of gammapy/estimators/parameter.py]
1 # Licensed under a 3-clause BSD style license - see LICENSE.rst
2 import logging
3 import numpy as np
4 from gammapy.datasets import Datasets
5 from gammapy.modeling import Fit
6 from .core import Estimator
7
8 log = logging.getLogger(__name__)
9
10
11 class ParameterEstimator(Estimator):
12 """Model parameter estimator.
13
14 Estimates a model parameter for a group of datasets. Compute best fit value,
15 symmetric and delta TS for a given null value. Additionally asymmetric errors
16 as well as parameter upper limit and fit statistic profile can be estimated.
17
18 Parameters
19 ----------
20 n_sigma : int
21 Sigma to use for asymmetric error computation. Default is 1.
22 n_sigma_ul : int
23 Sigma to use for upper limit computation. Default is 2.
24 null_value : float
25 Which null value to use for the parameter
26 selection_optional : list of str
27 Which additional quantities to estimate. Available options are:
28
29 * "all": all the optional steps are executed
30 * "errn-errp": estimate asymmetric errors on parameter best fit value.
31 * "ul": estimate upper limits.
32 * "scan": estimate fit statistic profiles.
33
34 Default is None so the optionnal steps are not executed.
35 fit : `Fit`
36 Fit instance specifying the backend and fit options.
37 reoptimize : bool
38 Re-optimize other free model parameters. Default is True.
39 """
40
41 tag = "ParameterEstimator"
42 _available_selection_optional = ["errn-errp", "ul", "scan"]
43
44 def __init__(
45 self,
46 n_sigma=1,
47 n_sigma_ul=2,
48 null_value=1e-150,
49 selection_optional=None,
50 fit=None,
51 reoptimize=True
52 ):
53 self.n_sigma = n_sigma
54 self.n_sigma_ul = n_sigma_ul
55 self.null_value = null_value
56 self.selection_optional = selection_optional
57
58 if fit is None:
59 fit = Fit()
60
61 self.fit = fit
62 self.reoptimize = reoptimize
63
64 def estimate_best_fit(self, datasets, parameter):
65 """Estimate parameter assymetric errors
66
67 Parameters
68 ----------
69 datasets : `~gammapy.datasets.Datasets`
70 Datasets
71 parameter : `Parameter`
72 For which parameter to get the value
73
74 Returns
75 -------
76 result : dict
77 Dict with the various parameter estimation values.
78 """
79 value, total_stat, success, error = np.nan, 0, False, np.nan
80
81 if np.any(datasets.contributes_to_stat):
82 result = self.fit.run(datasets=datasets)
83 value, error = parameter.value, parameter.error
84 total_stat = result["optimize_result"].total_stat
85 success = result["optimize_result"].success
86
87 return {
88 f"{parameter.name}": value,
89 "stat": total_stat,
90 "success": success,
91 f"{parameter.name}_err": error * self.n_sigma,
92 }
93
94 def estimate_ts(self, datasets, parameter):
95 """Estimate parameter ts
96
97 Parameters
98 ----------
99 datasets : `~gammapy.datasets.Datasets`
100 Datasets
101 parameter : `Parameter`
102 For which parameter to get the value
103
104 Returns
105 -------
106 result : dict
107 Dict with the various parameter estimation values.
108 """
109 if not np.any(datasets.contributes_to_stat):
110 return {"ts": np.nan}
111
112 stat = datasets.stat_sum()
113 npred = self.estimate_npred(datasets=datasets)
114
115 with datasets.parameters.restore_status():
116 # compute ts value
117 parameter.value = self.null_value
118
119 if self.reoptimize:
120 parameter.frozen = True
121 _ = self.fit.optimize(datasets=datasets)
122
123 ts = datasets.stat_sum() - stat
124 npred_null = self.estimate_npred(datasets=datasets)
125
126 return {
127 "ts": ts,
128 "npred": npred["npred"],
129 "npred_null": npred_null["npred"]
130 }
131
132 def estimate_errn_errp(self, datasets, parameter):
133 """Estimate parameter assymetric errors
134
135 Parameters
136 ----------
137 datasets : `~gammapy.datasets.Datasets`
138 Datasets
139 parameter : `Parameter`
140 For which parameter to get the value
141
142 Returns
143 -------
144 result : dict
145 Dict with the various parameter estimation values.
146 """
147 if not np.any(datasets.contributes_to_stat):
148 return {
149 f"{parameter.name}_errp": np.nan,
150 f"{parameter.name}_errn": np.nan,
151 }
152
153 self.fit.optimize(datasets=datasets)
154
155 res = self.fit.confidence(
156 datasets=datasets,
157 parameter=parameter,
158 sigma=self.n_sigma,
159 reoptimize=self.reoptimize
160 )
161
162 return {
163 f"{parameter.name}_errp": res["errp"],
164 f"{parameter.name}_errn": res["errn"],
165 }
166
167 def estimate_scan(self, datasets, parameter):
168 """Estimate parameter stat scan.
169
170 Parameters
171 ----------
172 datasets : `~gammapy.datasets.Datasets`
173 The datasets used to estimate the model parameter
174 parameter : `Parameter`
175 For which parameter to get the value
176
177 Returns
178 -------
179 result : dict
180 Dict with the various parameter estimation values.
181
182 """
183 scan_values = parameter.scan_values
184
185 if not np.any(datasets.contributes_to_stat):
186 return {
187 f"{parameter.name}_scan": scan_values,
188 "stat_scan": scan_values * np.nan
189 }
190
191 self.fit.optimize(datasets=datasets)
192
193 profile = self.fit.stat_profile(
194 datasets=datasets,
195 parameter=parameter,
196 reoptimize=self.reoptimize
197 )
198
199 return {
200 f"{parameter.name}_scan": scan_values,
201 "stat_scan": profile["stat_scan"],
202 }
203
204 def estimate_ul(self, datasets, parameter):
205 """Estimate parameter ul.
206
207 Parameters
208 ----------
209 datasets : `~gammapy.datasets.Datasets`
210 The datasets used to estimate the model parameter
211 parameter : `Parameter`
212 For which parameter to get the value
213
214 Returns
215 -------
216 result : dict
217 Dict with the various parameter estimation values.
218
219 """
220 if not np.any(datasets.contributes_to_stat):
221 return {f"{parameter.name}_ul": np.nan}
222
223 self.fit.optimize(datasets=datasets)
224
225 res = self.fit.confidence(
226 datasets=datasets,
227 parameter=parameter,
228 sigma=self.n_sigma_ul,
229 reoptimize=self.reoptimize
230 )
231 return {f"{parameter.name}_ul": res["errp"] + parameter.value}
232
233 @staticmethod
234 def estimate_counts(datasets):
235 """Estimate counts for the flux point.
236
237 Parameters
238 ----------
239 datasets : Datasets
240 Datasets
241
242 Returns
243 -------
244 result : dict
245 Dict with an array with one entry per dataset with the sum of the
246 masked counts.
247 """
248 counts = []
249
250 for dataset in datasets:
251 mask = dataset.mask
252 counts.append(dataset.counts.data[mask].sum())
253
254 return {"counts": np.array(counts, dtype=int), "datasets": datasets.names}
255
256 @staticmethod
257 def estimate_npred(datasets):
258 """Estimate npred for the flux point.
259
260 Parameters
261 ----------
262 datasets : Datasets
263 Datasets
264
265 Returns
266 -------
267 result : dict
268 Dict with an array with one entry per dataset with the sum of the
269 masked npred.
270 """
271 npred = []
272
273 for dataset in datasets:
274 mask = dataset.mask
275 npred.append(dataset.npred().data[mask].sum())
276
277 return {"npred": np.array(npred), "datasets": datasets.names}
278
279 def run(self, datasets, parameter):
280 """Run the parameter estimator.
281
282 Parameters
283 ----------
284 datasets : `~gammapy.datasets.Datasets`
285 The datasets used to estimate the model parameter
286 parameter : `str` or `Parameter`
287 For which parameter to run the estimator
288
289 Returns
290 -------
291 result : dict
292 Dict with the various parameter estimation values.
293 """
294 datasets = Datasets(datasets)
295 parameter = datasets.parameters[parameter]
296
297 with datasets.parameters.restore_status():
298
299 if not self.reoptimize:
300 datasets.parameters.freeze_all()
301 parameter.frozen = False
302
303 result = self.estimate_best_fit(datasets, parameter)
304 result.update(self.estimate_ts(datasets, parameter))
305
306 if "errn-errp" in self.selection_optional:
307 result.update(self.estimate_errn_errp(datasets, parameter))
308
309 if "ul" in self.selection_optional:
310 result.update(self.estimate_ul(datasets, parameter))
311
312 if "scan" in self.selection_optional:
313 result.update(self.estimate_scan(datasets, parameter))
314
315 result.update(self.estimate_counts(datasets))
316 return result
317
[end of gammapy/estimators/parameter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gammapy/estimators/parameter.py b/gammapy/estimators/parameter.py
--- a/gammapy/estimators/parameter.py
+++ b/gammapy/estimators/parameter.py
@@ -74,7 +74,12 @@
Returns
-------
result : dict
- Dict with the various parameter estimation values.
+ Dict with the various parameter estimation values. Entries are:
+
+ * parameter.name: best fit parameter value
+ * "stat": best fit total stat.
+ * "success": boolean flag for fit success
+ * parameter.name_err: covariance-based error estimate on parameter value
"""
value, total_stat, success, error = np.nan, 0, False, np.nan
@@ -104,14 +109,20 @@
Returns
-------
result : dict
- Dict with the various parameter estimation values.
- """
- if not np.any(datasets.contributes_to_stat):
- return {"ts": np.nan}
+ Dict with the TS of the best fit value compared to the null hypothesis. Entries are:
- stat = datasets.stat_sum()
+ * TS : fit statistic difference with null hypothesis
+ * "npred" : predicted number of counts per dataset
+ * "npred_null" : predicted number of counts per dataset in the null hypothesis
+ """
npred = self.estimate_npred(datasets=datasets)
+ if not np.any(datasets.contributes_to_stat):
+ stat = np.nan
+ npred["npred"][...] = np.nan
+ else:
+ stat = datasets.stat_sum()
+
with datasets.parameters.restore_status():
# compute ts value
parameter.value = self.null_value
@@ -142,7 +153,10 @@
Returns
-------
result : dict
- Dict with the various parameter estimation values.
+ Dict with the parameter asymmetric errors. Entries are:
+
+ * parameter.name_errp : positive error on parameter value
+ * parameter.name_errn : negative error on parameter value
"""
if not np.any(datasets.contributes_to_stat):
return {
@@ -177,8 +191,10 @@
Returns
-------
result : dict
- Dict with the various parameter estimation values.
+ Dict with the parameter fit scan values. Entries are:
+ * parameter.name_scan : parameter values scan
+ * "stat_scan" : fit statistic values scan
"""
scan_values = parameter.scan_values
@@ -214,8 +230,9 @@
Returns
-------
result : dict
- Dict with the various parameter estimation values.
-
+ Dict with the parameter ULs. Entries are:
+
+ * parameter.name_ul : upper limit on parameter value
"""
if not np.any(datasets.contributes_to_stat):
return {f"{parameter.name}_ul": np.nan}
| {"golden_diff": "diff --git a/gammapy/estimators/parameter.py b/gammapy/estimators/parameter.py\n--- a/gammapy/estimators/parameter.py\n+++ b/gammapy/estimators/parameter.py\n@@ -74,7 +74,12 @@\n Returns\n -------\n result : dict\n- Dict with the various parameter estimation values.\n+ Dict with the various parameter estimation values. Entries are:\n+\n+ * parameter.name: best fit parameter value\n+ * \"stat\": best fit total stat.\n+ * \"success\": boolean flag for fit success\n+ * parameter.name_err: covariance-based error estimate on parameter value\n \"\"\"\n value, total_stat, success, error = np.nan, 0, False, np.nan\n \n@@ -104,14 +109,20 @@\n Returns\n -------\n result : dict\n- Dict with the various parameter estimation values.\n- \"\"\"\n- if not np.any(datasets.contributes_to_stat):\n- return {\"ts\": np.nan}\n+ Dict with the TS of the best fit value compared to the null hypothesis. Entries are:\n \n- stat = datasets.stat_sum()\n+ * TS : fit statistic difference with null hypothesis\n+ * \"npred\" : predicted number of counts per dataset\n+ * \"npred_null\" : predicted number of counts per dataset in the null hypothesis\n+ \"\"\"\n npred = self.estimate_npred(datasets=datasets)\n \n+ if not np.any(datasets.contributes_to_stat):\n+ stat = np.nan\n+ npred[\"npred\"][...] = np.nan\n+ else:\n+ stat = datasets.stat_sum()\n+\n with datasets.parameters.restore_status():\n # compute ts value\n parameter.value = self.null_value\n@@ -142,7 +153,10 @@\n Returns\n -------\n result : dict\n- Dict with the various parameter estimation values.\n+ Dict with the parameter asymmetric errors. Entries are:\n+\n+ * parameter.name_errp : positive error on parameter value\n+ * parameter.name_errn : negative error on parameter value\n \"\"\"\n if not np.any(datasets.contributes_to_stat):\n return {\n@@ -177,8 +191,10 @@\n Returns\n -------\n result : dict\n- Dict with the various parameter estimation values.\n+ Dict with the parameter fit scan values. Entries are:\n \n+ * parameter.name_scan : parameter values scan\n+ * \"stat_scan\" : fit statistic values scan\n \"\"\"\n scan_values = parameter.scan_values\n \n@@ -214,8 +230,9 @@\n Returns\n -------\n result : dict\n- Dict with the various parameter estimation values.\n-\n+ Dict with the parameter ULs. Entries are:\n+ \n+ * parameter.name_ul : upper limit on parameter value\n \"\"\"\n if not np.any(datasets.contributes_to_stat):\n return {f\"{parameter.name}_ul\": np.nan}\n", "issue": "LightCurveEstimator fails if no dataset contribute in a time bin\n**Gammapy version**\r\ndev\r\n\r\n**Bug description**\r\nIf you run the `LightCurveEstimator` and no dataset are found to contribute in a time interval, it will fail because the `FluxPoints` produced has no `npred` quantity attached. \r\nThis is because the `ParameterEstimator.estimate_ts` only return `ts` as `nan` before estimating the `npred`. \r\nhttps://github.com/gammapy/gammapy/blob/d1ede2663429253b72011b9eda256ed2b48f8e9c/gammapy/estimators/parameter.py#L110\r\n\r\n\r\n**Expected behavior**\r\nThe estimator should run and return `nan` for `npred` as well.\r\n\r\n**To Reproduce**\r\nIf you want to test, use the light curve validation, script and change the energy range to e.g. 0.5-0.7 TeV in line https://github.com/gammapy/gammapy-benchmarks/blob/8d02a809583a8ddf3d222c5b7db2b542c7f2addf/validation/lightcurve/make.py#L108\r\n\r\nit will yield:\r\n```\r\nTraceback (most recent call last):\r\n File \"make.py\", line 278, in <module>\r\n cli()\r\n File \"/Users/terrier/Code/anaconda3/envs/gammapy-dev/lib/python3.7/site-packages/click/core.py\", line 1137, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/Users/terrier/Code/anaconda3/envs/gammapy-dev/lib/python3.7/site-packages/click/core.py\", line 1062, in main\r\n rv = self.invoke(ctx)\r\n File \"/Users/terrier/Code/anaconda3/envs/gammapy-dev/lib/python3.7/site-packages/click/core.py\", line 1668, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/Users/terrier/Code/anaconda3/envs/gammapy-dev/lib/python3.7/site-packages/click/core.py\", line 1404, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/Users/terrier/Code/anaconda3/envs/gammapy-dev/lib/python3.7/site-packages/click/core.py\", line 763, in invoke\r\n return __callback(*args, **kwargs)\r\n File \"make.py\", line 69, in run_analyses\r\n perform_analysis(analysis_type, short_observations, target_position, time_intervals)\r\n File \"make.py\", line 114, in perform_analysis\r\n lc = lc_maker.run(datasets)\r\n File \"/Users/terrier/Code/gammapy-dev/gammapy/gammapy/estimators/lightcurve.py\", line 126, in run\r\n fp._data[name], dataset_names=datasets.names\r\n File \"/Users/terrier/Code/gammapy-dev/gammapy/gammapy/maps/maps.py\", line 46, in __getitem__\r\n return self._data[key]\r\nKeyError: 'npred'\r\n```\r\n\r\n**Other information**\r\nAny other information you think will be useful for us to fix the issue can go here.\r\n\n", "before_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport logging\nimport numpy as np\nfrom gammapy.datasets import Datasets\nfrom gammapy.modeling import Fit\nfrom .core import Estimator\n\nlog = logging.getLogger(__name__)\n\n\nclass ParameterEstimator(Estimator):\n \"\"\"Model parameter estimator.\n\n Estimates a model parameter for a group of datasets. Compute best fit value,\n symmetric and delta TS for a given null value. Additionally asymmetric errors\n as well as parameter upper limit and fit statistic profile can be estimated.\n\n Parameters\n ----------\n n_sigma : int\n Sigma to use for asymmetric error computation. Default is 1.\n n_sigma_ul : int\n Sigma to use for upper limit computation. Default is 2.\n null_value : float\n Which null value to use for the parameter\n selection_optional : list of str\n Which additional quantities to estimate. Available options are:\n\n * \"all\": all the optional steps are executed\n * \"errn-errp\": estimate asymmetric errors on parameter best fit value.\n * \"ul\": estimate upper limits.\n * \"scan\": estimate fit statistic profiles.\n\n Default is None so the optionnal steps are not executed.\n fit : `Fit`\n Fit instance specifying the backend and fit options.\n reoptimize : bool\n Re-optimize other free model parameters. Default is True.\n \"\"\"\n\n tag = \"ParameterEstimator\"\n _available_selection_optional = [\"errn-errp\", \"ul\", \"scan\"]\n\n def __init__(\n self,\n n_sigma=1,\n n_sigma_ul=2,\n null_value=1e-150,\n selection_optional=None,\n fit=None,\n reoptimize=True\n ):\n self.n_sigma = n_sigma\n self.n_sigma_ul = n_sigma_ul\n self.null_value = null_value\n self.selection_optional = selection_optional\n\n if fit is None:\n fit = Fit()\n\n self.fit = fit\n self.reoptimize = reoptimize\n\n def estimate_best_fit(self, datasets, parameter):\n \"\"\"Estimate parameter assymetric errors\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n Datasets\n parameter : `Parameter`\n For which parameter to get the value\n\n Returns\n -------\n result : dict\n Dict with the various parameter estimation values.\n \"\"\"\n value, total_stat, success, error = np.nan, 0, False, np.nan\n\n if np.any(datasets.contributes_to_stat):\n result = self.fit.run(datasets=datasets)\n value, error = parameter.value, parameter.error\n total_stat = result[\"optimize_result\"].total_stat\n success = result[\"optimize_result\"].success\n\n return {\n f\"{parameter.name}\": value,\n \"stat\": total_stat,\n \"success\": success,\n f\"{parameter.name}_err\": error * self.n_sigma,\n }\n\n def estimate_ts(self, datasets, parameter):\n \"\"\"Estimate parameter ts\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n Datasets\n parameter : `Parameter`\n For which parameter to get the value\n\n Returns\n -------\n result : dict\n Dict with the various parameter estimation values.\n \"\"\"\n if not np.any(datasets.contributes_to_stat):\n return {\"ts\": np.nan}\n\n stat = datasets.stat_sum()\n npred = self.estimate_npred(datasets=datasets)\n\n with datasets.parameters.restore_status():\n # compute ts value\n parameter.value = self.null_value\n\n if self.reoptimize:\n parameter.frozen = True\n _ = self.fit.optimize(datasets=datasets)\n\n ts = datasets.stat_sum() - stat\n npred_null = self.estimate_npred(datasets=datasets)\n\n return {\n \"ts\": ts,\n \"npred\": npred[\"npred\"],\n \"npred_null\": npred_null[\"npred\"]\n }\n\n def estimate_errn_errp(self, datasets, parameter):\n \"\"\"Estimate parameter assymetric errors\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n Datasets\n parameter : `Parameter`\n For which parameter to get the value\n\n Returns\n -------\n result : dict\n Dict with the various parameter estimation values.\n \"\"\"\n if not np.any(datasets.contributes_to_stat):\n return {\n f\"{parameter.name}_errp\": np.nan,\n f\"{parameter.name}_errn\": np.nan,\n }\n\n self.fit.optimize(datasets=datasets)\n\n res = self.fit.confidence(\n datasets=datasets,\n parameter=parameter,\n sigma=self.n_sigma,\n reoptimize=self.reoptimize\n )\n\n return {\n f\"{parameter.name}_errp\": res[\"errp\"],\n f\"{parameter.name}_errn\": res[\"errn\"],\n }\n\n def estimate_scan(self, datasets, parameter):\n \"\"\"Estimate parameter stat scan.\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n The datasets used to estimate the model parameter\n parameter : `Parameter`\n For which parameter to get the value\n\n Returns\n -------\n result : dict\n Dict with the various parameter estimation values.\n\n \"\"\"\n scan_values = parameter.scan_values\n\n if not np.any(datasets.contributes_to_stat):\n return {\n f\"{parameter.name}_scan\": scan_values,\n \"stat_scan\": scan_values * np.nan\n }\n\n self.fit.optimize(datasets=datasets)\n\n profile = self.fit.stat_profile(\n datasets=datasets,\n parameter=parameter,\n reoptimize=self.reoptimize\n )\n\n return {\n f\"{parameter.name}_scan\": scan_values,\n \"stat_scan\": profile[\"stat_scan\"],\n }\n\n def estimate_ul(self, datasets, parameter):\n \"\"\"Estimate parameter ul.\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n The datasets used to estimate the model parameter\n parameter : `Parameter`\n For which parameter to get the value\n\n Returns\n -------\n result : dict\n Dict with the various parameter estimation values.\n\n \"\"\"\n if not np.any(datasets.contributes_to_stat):\n return {f\"{parameter.name}_ul\": np.nan}\n\n self.fit.optimize(datasets=datasets)\n\n res = self.fit.confidence(\n datasets=datasets,\n parameter=parameter,\n sigma=self.n_sigma_ul,\n reoptimize=self.reoptimize\n )\n return {f\"{parameter.name}_ul\": res[\"errp\"] + parameter.value}\n\n @staticmethod\n def estimate_counts(datasets):\n \"\"\"Estimate counts for the flux point.\n\n Parameters\n ----------\n datasets : Datasets\n Datasets\n\n Returns\n -------\n result : dict\n Dict with an array with one entry per dataset with the sum of the\n masked counts.\n \"\"\"\n counts = []\n\n for dataset in datasets:\n mask = dataset.mask\n counts.append(dataset.counts.data[mask].sum())\n\n return {\"counts\": np.array(counts, dtype=int), \"datasets\": datasets.names}\n\n @staticmethod\n def estimate_npred(datasets):\n \"\"\"Estimate npred for the flux point.\n\n Parameters\n ----------\n datasets : Datasets\n Datasets\n\n Returns\n -------\n result : dict\n Dict with an array with one entry per dataset with the sum of the\n masked npred.\n \"\"\"\n npred = []\n\n for dataset in datasets:\n mask = dataset.mask\n npred.append(dataset.npred().data[mask].sum())\n\n return {\"npred\": np.array(npred), \"datasets\": datasets.names}\n\n def run(self, datasets, parameter):\n \"\"\"Run the parameter estimator.\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n The datasets used to estimate the model parameter\n parameter : `str` or `Parameter`\n For which parameter to run the estimator\n\n Returns\n -------\n result : dict\n Dict with the various parameter estimation values.\n \"\"\"\n datasets = Datasets(datasets)\n parameter = datasets.parameters[parameter]\n\n with datasets.parameters.restore_status():\n\n if not self.reoptimize:\n datasets.parameters.freeze_all()\n parameter.frozen = False\n\n result = self.estimate_best_fit(datasets, parameter)\n result.update(self.estimate_ts(datasets, parameter))\n\n if \"errn-errp\" in self.selection_optional:\n result.update(self.estimate_errn_errp(datasets, parameter))\n\n if \"ul\" in self.selection_optional:\n result.update(self.estimate_ul(datasets, parameter))\n\n if \"scan\" in self.selection_optional:\n result.update(self.estimate_scan(datasets, parameter))\n\n result.update(self.estimate_counts(datasets))\n return result\n", "path": "gammapy/estimators/parameter.py"}]} | 4,082 | 658 |
gh_patches_debug_25006 | rasdani/github-patches | git_diff | PrefectHQ__prefect-11253 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Windows prefect deploy does not work with docker
### First check
- [X] I added a descriptive title to this issue.
- [X] I used the GitHub search to find a similar issue and didn't find it.
- [X] I searched the Prefect documentation for this issue.
- [X] I checked that this issue is related to Prefect and not one of its dependencies.
### Bug summary
prefect deploy -n "name of deployment" does not work anymore on Windows.
### Reproduction
```python3
prefect deploy -n
```
### Error
```python3
Running deployment build steps...
> Running run_shell_script step...
> Running build_docker_image step...
Unable to load step function: prefect_docker.deployments.steps.build_docker_image. Attempting install of prefect-docker>=0.3.11.
Traceback (most recent call last):
File "C:\Users\JKANG1\PycharmProjects\site_analytics\.venv\Lib\site-packages\prefect\deployments\steps\core.py", line 149, in run_steps
step_output = await run_step(step, upstream_outputs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\JKANG1\PycharmProjects\site_analytics\.venv\Lib\site-packages\prefect\deployments\steps\core.py", line 119, in run_step
step_func = _get_function_for_step(fqn, requires=keywords.get("requires"))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\JKANG1\PycharmProjects\site_analytics\.venv\Lib\site-packages\prefect\deployments\steps\core.py", line 83, in _get_function_for_step
subprocess.check_call(
File "C:\Users\JKANG1\AppData\Local\Programs\Python\Python311\Lib\subprocess.py", line 408, in check_call
retcode = call(*popenargs, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\JKANG1\AppData\Local\Programs\Python\Python311\Lib\subprocess.py", line 389, in call
with Popen(*popenargs, **kwargs) as p:
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\JKANG1\AppData\Local\Programs\Python\Python311\Lib\subprocess.py", line 1026, in __init__
self._execute_child(args, executable, preexec_fn, close_fds,
File "C:\Users\JKANG1\AppData\Local\Programs\Python\Python311\Lib\subprocess.py", line 1538, in _execute_child
hp, ht, pid, tid = _winapi.CreateProcess(executable, args,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
FileNotFoundError: [WinError 2] The system cannot find the file specified
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "C:\Users\JKANG1\PycharmProjects\site_analytics\.venv\Lib\site-packages\prefect\cli\_utilities.py", line 41, in wrapper
return fn(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^
File "C:\Users\JKANG1\PycharmProjects\site_analytics\.venv\Lib\site-packages\prefect\utilities\asyncutils.py", line 255, in coroutine_wrapper
return call()
^^^^^^
File "C:\Users\JKANG1\PycharmProjects\site_analytics\.venv\Lib\site-packages\prefect\_internal\concurrency\calls.py", line 382, in __call__
return self.result()
^^^^^^^^^^^^^
File "C:\Users\JKANG1\PycharmProjects\site_analytics\.venv\Lib\site-packages\prefect\_internal\concurrency\calls.py", line 282, in result
return self.future.result(timeout=timeout)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\JKANG1\PycharmProjects\site_analytics\.venv\Lib\site-packages\prefect\_internal\concurrency\calls.py", line 168, in result
return self.__get_result()
^^^^^^^^^^^^^^^^^^^
File "C:\Users\JKANG1\AppData\Local\Programs\Python\Python311\Lib\concurrent\futures\_base.py", line 401, in __get_result
raise self._exception
File "C:\Users\JKANG1\PycharmProjects\site_analytics\.venv\Lib\site-packages\prefect\_internal\concurrency\calls.py", line 345, in _run_async
result = await coro
^^^^^^^^^^
File "C:\Users\JKANG1\PycharmProjects\site_analytics\.venv\Lib\site-packages\prefect\cli\deploy.py", line 292, in deploy
await _run_single_deploy(
File "C:\Users\JKANG1\PycharmProjects\site_analytics\.venv\Lib\site-packages\prefect\client\utilities.py", line 51, in with_injected_client
return await fn(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\JKANG1\PycharmProjects\site_analytics\.venv\Lib\site-packages\prefect\cli\deploy.py", line 595, in _run_single_deploy
await run_steps(build_steps, step_outputs, print_function=app.console.print)
File "C:\Users\JKANG1\PycharmProjects\site_analytics\.venv\Lib\site-packages\prefect\deployments\steps\core.py", line 177, in run_steps
raise StepExecutionError(f"Encountered error while running {fqn}") from exc
prefect.deployments.steps.core.StepExecutionError: Encountered error while running prefect_docker.deployments.steps.build_docker_image
An exception occurred.
```
### Versions
```Text
Version: 2.14.4
API version: 0.8.4
Python version: 3.11.6
Git commit: d2cf30f4
Built: Thu, Nov 9, 2023 4:48 PM
OS/Arch: win32/AMD64
Profile: default
Server type: cloud
```
### Additional context
_No response_
</issue>
<code>
[start of src/prefect/deployments/steps/core.py]
1 """
2 Core primitives for running Prefect project steps.
3
4 Project steps are YAML representations of Python functions along with their inputs.
5
6 Whenever a step is run, the following actions are taken:
7
8 - The step's inputs and block / variable references are resolved (see [the projects concepts documentation](/concepts/projects/#templating-options) for more details)
9 - The step's function is imported; if it cannot be found, the `requires` keyword is used to install the necessary packages
10 - The step's function is called with the resolved inputs
11 - The step's output is returned and used to resolve inputs for subsequent steps
12 """
13 import os
14 import re
15 import subprocess
16 import warnings
17 from copy import deepcopy
18 from importlib import import_module
19 from typing import Any, Dict, List, Optional, Tuple, Union
20
21 from prefect._internal.compatibility.deprecated import PrefectDeprecationWarning
22 from prefect._internal.concurrency.api import Call, from_async
23 from prefect.logging.loggers import get_logger
24 from prefect.settings import PREFECT_DEBUG_MODE
25 from prefect.utilities.importtools import import_object
26 from prefect.utilities.processutils import get_sys_executable
27 from prefect.utilities.templating import (
28 apply_values,
29 resolve_block_document_references,
30 resolve_variables,
31 )
32
33 RESERVED_KEYWORDS = {"id", "requires"}
34
35
36 class StepExecutionError(Exception):
37 """
38 Raised when a step fails to execute.
39 """
40
41
42 def _strip_version(requirement: str) -> str:
43 """
44 Strips the version from a requirement string.
45
46 Args:
47 requirement: A requirement string, e.g. "requests>=2.0.0"
48
49 Returns:
50 The package name, e.g. "requests"
51
52 Examples:
53 ```python
54 >>> _strip_version("s3fs>=2.0.0<3.0.0")
55 "s3fs"
56 ```
57 """
58 # split on any of the characters in the set [<>=!~]
59 # and return the first element which will be the package name
60 return re.split(r"[<>=!~]", requirement)[0].strip()
61
62
63 def _get_function_for_step(
64 fully_qualified_name: str, requires: Union[str, List[str], None] = None
65 ):
66 if not isinstance(requires, list):
67 packages = [requires] if requires else []
68 else:
69 packages = requires
70
71 try:
72 for package in packages:
73 import_module(_strip_version(package).replace("-", "_"))
74 step_func = import_object(fully_qualified_name)
75 return step_func
76 except ImportError:
77 if requires:
78 print(
79 f"Unable to load step function: {fully_qualified_name}. Attempting"
80 f" install of {requires}."
81 )
82 else:
83 raise
84
85 try:
86 subprocess.check_call(
87 [get_sys_executable(), "-m", "pip", "install", *packages],
88 )
89 except subprocess.CalledProcessError:
90 get_logger("deployments.steps.core").warning(
91 "Unable to install required packages for %s", fully_qualified_name
92 )
93 step_func = import_object(fully_qualified_name)
94 return step_func
95
96
97 async def run_step(step: Dict, upstream_outputs: Optional[Dict] = None) -> Dict:
98 """
99 Runs a step, returns the step's output.
100
101 Steps are assumed to be in the format `{"importable.func.name": {"kwarg1": "value1", ...}}`.
102
103 The 'id and 'requires' keywords are reserved for specific purposes and will be removed from the
104 inputs before passing to the step function:
105
106 This keyword is used to specify packages that should be installed before running the step.
107 """
108 fqn, inputs = _get_step_fully_qualified_name_and_inputs(step)
109 upstream_outputs = upstream_outputs or {}
110
111 if len(step.keys()) > 1:
112 raise ValueError(
113 f"Step has unexpected additional keys: {', '.join(list(step.keys())[1:])}"
114 )
115
116 keywords = {
117 keyword: inputs.pop(keyword)
118 for keyword in RESERVED_KEYWORDS
119 if keyword in inputs
120 }
121
122 inputs = apply_values(inputs, upstream_outputs)
123 inputs = await resolve_block_document_references(inputs)
124 inputs = await resolve_variables(inputs)
125 inputs = apply_values(inputs, os.environ)
126 step_func = _get_function_for_step(fqn, requires=keywords.get("requires"))
127 result = await from_async.call_soon_in_new_thread(
128 Call.new(step_func, **inputs)
129 ).aresult()
130 return result
131
132
133 async def run_steps(
134 steps: List[Dict[str, Any]],
135 upstream_outputs: Optional[Dict[str, Any]] = None,
136 print_function: Any = print,
137 ):
138 upstream_outputs = deepcopy(upstream_outputs) if upstream_outputs else {}
139 for step in steps:
140 if not step:
141 continue
142 fqn, inputs = _get_step_fully_qualified_name_and_inputs(step)
143 step_name = fqn.split(".")[-1]
144 print_function(f" > Running {step_name} step...")
145 try:
146 # catch warnings to ensure deprecation warnings are printed
147 with warnings.catch_warnings(record=True) as w:
148 warnings.simplefilter(
149 "always",
150 category=PrefectDeprecationWarning,
151 )
152 warnings.simplefilter(
153 "always",
154 category=DeprecationWarning,
155 )
156 step_output = await run_step(step, upstream_outputs)
157 if w:
158 printed_messages = []
159 for warning in w:
160 message = str(warning.message)
161 # prevent duplicate warnings from being printed
162 if message not in printed_messages:
163 try:
164 # try using rich styling
165 print_function(message, style="yellow")
166 except Exception:
167 # default to printing without styling
168 print_function(message)
169 printed_messages.append(message)
170
171 if not isinstance(step_output, dict):
172 if PREFECT_DEBUG_MODE:
173 get_logger().warning(
174 "Step function %s returned unexpected type: %s",
175 fqn,
176 type(step_output),
177 )
178 continue
179 # store step output under step id to prevent clobbering
180 if inputs.get("id"):
181 upstream_outputs[inputs.get("id")] = step_output
182 upstream_outputs.update(step_output)
183 except Exception as exc:
184 raise StepExecutionError(f"Encountered error while running {fqn}") from exc
185 return upstream_outputs
186
187
188 def _get_step_fully_qualified_name_and_inputs(step: Dict) -> Tuple[str, Dict]:
189 step = deepcopy(step)
190 return step.popitem()
191
[end of src/prefect/deployments/steps/core.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/prefect/deployments/steps/core.py b/src/prefect/deployments/steps/core.py
--- a/src/prefect/deployments/steps/core.py
+++ b/src/prefect/deployments/steps/core.py
@@ -13,6 +13,7 @@
import os
import re
import subprocess
+import sys
import warnings
from copy import deepcopy
from importlib import import_module
@@ -23,7 +24,6 @@
from prefect.logging.loggers import get_logger
from prefect.settings import PREFECT_DEBUG_MODE
from prefect.utilities.importtools import import_object
-from prefect.utilities.processutils import get_sys_executable
from prefect.utilities.templating import (
apply_values,
resolve_block_document_references,
@@ -83,9 +83,7 @@
raise
try:
- subprocess.check_call(
- [get_sys_executable(), "-m", "pip", "install", *packages],
- )
+ subprocess.check_call([sys.executable, "-m", "pip", "install", *packages])
except subprocess.CalledProcessError:
get_logger("deployments.steps.core").warning(
"Unable to install required packages for %s", fully_qualified_name
| {"golden_diff": "diff --git a/src/prefect/deployments/steps/core.py b/src/prefect/deployments/steps/core.py\n--- a/src/prefect/deployments/steps/core.py\n+++ b/src/prefect/deployments/steps/core.py\n@@ -13,6 +13,7 @@\n import os\n import re\n import subprocess\n+import sys\n import warnings\n from copy import deepcopy\n from importlib import import_module\n@@ -23,7 +24,6 @@\n from prefect.logging.loggers import get_logger\n from prefect.settings import PREFECT_DEBUG_MODE\n from prefect.utilities.importtools import import_object\n-from prefect.utilities.processutils import get_sys_executable\n from prefect.utilities.templating import (\n apply_values,\n resolve_block_document_references,\n@@ -83,9 +83,7 @@\n raise\n \n try:\n- subprocess.check_call(\n- [get_sys_executable(), \"-m\", \"pip\", \"install\", *packages],\n- )\n+ subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", *packages])\n except subprocess.CalledProcessError:\n get_logger(\"deployments.steps.core\").warning(\n \"Unable to install required packages for %s\", fully_qualified_name\n", "issue": "Windows prefect deploy does not work with docker\n### First check\n\n- [X] I added a descriptive title to this issue.\n- [X] I used the GitHub search to find a similar issue and didn't find it.\n- [X] I searched the Prefect documentation for this issue.\n- [X] I checked that this issue is related to Prefect and not one of its dependencies.\n\n### Bug summary\n\nprefect deploy -n \"name of deployment\" does not work anymore on Windows. \n\n### Reproduction\n\n```python3\nprefect deploy -n\n```\n\n\n### Error\n\n```python3\nRunning deployment build steps...\r\n > Running run_shell_script step...\r\n > Running build_docker_image step...\r\nUnable to load step function: prefect_docker.deployments.steps.build_docker_image. Attempting install of prefect-docker>=0.3.11.\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\JKANG1\\PycharmProjects\\site_analytics\\.venv\\Lib\\site-packages\\prefect\\deployments\\steps\\core.py\", line 149, in run_steps\r\n step_output = await run_step(step, upstream_outputs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"C:\\Users\\JKANG1\\PycharmProjects\\site_analytics\\.venv\\Lib\\site-packages\\prefect\\deployments\\steps\\core.py\", line 119, in run_step\r\n step_func = _get_function_for_step(fqn, requires=keywords.get(\"requires\"))\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"C:\\Users\\JKANG1\\PycharmProjects\\site_analytics\\.venv\\Lib\\site-packages\\prefect\\deployments\\steps\\core.py\", line 83, in _get_function_for_step\r\n subprocess.check_call(\r\n File \"C:\\Users\\JKANG1\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\subprocess.py\", line 408, in check_call\r\n retcode = call(*popenargs, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"C:\\Users\\JKANG1\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\subprocess.py\", line 389, in call\r\n with Popen(*popenargs, **kwargs) as p:\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"C:\\Users\\JKANG1\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\subprocess.py\", line 1026, in __init__\r\n self._execute_child(args, executable, preexec_fn, close_fds,\r\n File \"C:\\Users\\JKANG1\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\subprocess.py\", line 1538, in _execute_child\r\n hp, ht, pid, tid = _winapi.CreateProcess(executable, args,\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\nFileNotFoundError: [WinError 2] The system cannot find the file specified\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\JKANG1\\PycharmProjects\\site_analytics\\.venv\\Lib\\site-packages\\prefect\\cli\\_utilities.py\", line 41, in wrapper\r\n return fn(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^\r\n File \"C:\\Users\\JKANG1\\PycharmProjects\\site_analytics\\.venv\\Lib\\site-packages\\prefect\\utilities\\asyncutils.py\", line 255, in coroutine_wrapper\r\n return call()\r\n ^^^^^^\r\n File \"C:\\Users\\JKANG1\\PycharmProjects\\site_analytics\\.venv\\Lib\\site-packages\\prefect\\_internal\\concurrency\\calls.py\", line 382, in __call__\r\n return self.result()\r\n ^^^^^^^^^^^^^\r\n File \"C:\\Users\\JKANG1\\PycharmProjects\\site_analytics\\.venv\\Lib\\site-packages\\prefect\\_internal\\concurrency\\calls.py\", line 282, in result\r\n return self.future.result(timeout=timeout)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"C:\\Users\\JKANG1\\PycharmProjects\\site_analytics\\.venv\\Lib\\site-packages\\prefect\\_internal\\concurrency\\calls.py\", line 168, in result\r\n return self.__get_result()\r\n ^^^^^^^^^^^^^^^^^^^\r\n File \"C:\\Users\\JKANG1\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\concurrent\\futures\\_base.py\", line 401, in __get_result\r\n raise self._exception\r\n File \"C:\\Users\\JKANG1\\PycharmProjects\\site_analytics\\.venv\\Lib\\site-packages\\prefect\\_internal\\concurrency\\calls.py\", line 345, in _run_async\r\n result = await coro\r\n ^^^^^^^^^^\r\n File \"C:\\Users\\JKANG1\\PycharmProjects\\site_analytics\\.venv\\Lib\\site-packages\\prefect\\cli\\deploy.py\", line 292, in deploy\r\n await _run_single_deploy(\r\n File \"C:\\Users\\JKANG1\\PycharmProjects\\site_analytics\\.venv\\Lib\\site-packages\\prefect\\client\\utilities.py\", line 51, in with_injected_client\r\n return await fn(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"C:\\Users\\JKANG1\\PycharmProjects\\site_analytics\\.venv\\Lib\\site-packages\\prefect\\cli\\deploy.py\", line 595, in _run_single_deploy\r\n await run_steps(build_steps, step_outputs, print_function=app.console.print)\r\n File \"C:\\Users\\JKANG1\\PycharmProjects\\site_analytics\\.venv\\Lib\\site-packages\\prefect\\deployments\\steps\\core.py\", line 177, in run_steps\r\n raise StepExecutionError(f\"Encountered error while running {fqn}\") from exc\r\nprefect.deployments.steps.core.StepExecutionError: Encountered error while running prefect_docker.deployments.steps.build_docker_image\r\nAn exception occurred.\n```\n\n\n### Versions\n\n```Text\nVersion: 2.14.4\r\nAPI version: 0.8.4\r\nPython version: 3.11.6\r\nGit commit: d2cf30f4\r\nBuilt: Thu, Nov 9, 2023 4:48 PM\r\nOS/Arch: win32/AMD64\r\nProfile: default\r\nServer type: cloud\n```\n\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "\"\"\"\nCore primitives for running Prefect project steps.\n\nProject steps are YAML representations of Python functions along with their inputs.\n\nWhenever a step is run, the following actions are taken:\n\n- The step's inputs and block / variable references are resolved (see [the projects concepts documentation](/concepts/projects/#templating-options) for more details)\n- The step's function is imported; if it cannot be found, the `requires` keyword is used to install the necessary packages\n- The step's function is called with the resolved inputs\n- The step's output is returned and used to resolve inputs for subsequent steps\n\"\"\"\nimport os\nimport re\nimport subprocess\nimport warnings\nfrom copy import deepcopy\nfrom importlib import import_module\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nfrom prefect._internal.compatibility.deprecated import PrefectDeprecationWarning\nfrom prefect._internal.concurrency.api import Call, from_async\nfrom prefect.logging.loggers import get_logger\nfrom prefect.settings import PREFECT_DEBUG_MODE\nfrom prefect.utilities.importtools import import_object\nfrom prefect.utilities.processutils import get_sys_executable\nfrom prefect.utilities.templating import (\n apply_values,\n resolve_block_document_references,\n resolve_variables,\n)\n\nRESERVED_KEYWORDS = {\"id\", \"requires\"}\n\n\nclass StepExecutionError(Exception):\n \"\"\"\n Raised when a step fails to execute.\n \"\"\"\n\n\ndef _strip_version(requirement: str) -> str:\n \"\"\"\n Strips the version from a requirement string.\n\n Args:\n requirement: A requirement string, e.g. \"requests>=2.0.0\"\n\n Returns:\n The package name, e.g. \"requests\"\n\n Examples:\n ```python\n >>> _strip_version(\"s3fs>=2.0.0<3.0.0\")\n \"s3fs\"\n ```\n \"\"\"\n # split on any of the characters in the set [<>=!~]\n # and return the first element which will be the package name\n return re.split(r\"[<>=!~]\", requirement)[0].strip()\n\n\ndef _get_function_for_step(\n fully_qualified_name: str, requires: Union[str, List[str], None] = None\n):\n if not isinstance(requires, list):\n packages = [requires] if requires else []\n else:\n packages = requires\n\n try:\n for package in packages:\n import_module(_strip_version(package).replace(\"-\", \"_\"))\n step_func = import_object(fully_qualified_name)\n return step_func\n except ImportError:\n if requires:\n print(\n f\"Unable to load step function: {fully_qualified_name}. Attempting\"\n f\" install of {requires}.\"\n )\n else:\n raise\n\n try:\n subprocess.check_call(\n [get_sys_executable(), \"-m\", \"pip\", \"install\", *packages],\n )\n except subprocess.CalledProcessError:\n get_logger(\"deployments.steps.core\").warning(\n \"Unable to install required packages for %s\", fully_qualified_name\n )\n step_func = import_object(fully_qualified_name)\n return step_func\n\n\nasync def run_step(step: Dict, upstream_outputs: Optional[Dict] = None) -> Dict:\n \"\"\"\n Runs a step, returns the step's output.\n\n Steps are assumed to be in the format `{\"importable.func.name\": {\"kwarg1\": \"value1\", ...}}`.\n\n The 'id and 'requires' keywords are reserved for specific purposes and will be removed from the\n inputs before passing to the step function:\n\n This keyword is used to specify packages that should be installed before running the step.\n \"\"\"\n fqn, inputs = _get_step_fully_qualified_name_and_inputs(step)\n upstream_outputs = upstream_outputs or {}\n\n if len(step.keys()) > 1:\n raise ValueError(\n f\"Step has unexpected additional keys: {', '.join(list(step.keys())[1:])}\"\n )\n\n keywords = {\n keyword: inputs.pop(keyword)\n for keyword in RESERVED_KEYWORDS\n if keyword in inputs\n }\n\n inputs = apply_values(inputs, upstream_outputs)\n inputs = await resolve_block_document_references(inputs)\n inputs = await resolve_variables(inputs)\n inputs = apply_values(inputs, os.environ)\n step_func = _get_function_for_step(fqn, requires=keywords.get(\"requires\"))\n result = await from_async.call_soon_in_new_thread(\n Call.new(step_func, **inputs)\n ).aresult()\n return result\n\n\nasync def run_steps(\n steps: List[Dict[str, Any]],\n upstream_outputs: Optional[Dict[str, Any]] = None,\n print_function: Any = print,\n):\n upstream_outputs = deepcopy(upstream_outputs) if upstream_outputs else {}\n for step in steps:\n if not step:\n continue\n fqn, inputs = _get_step_fully_qualified_name_and_inputs(step)\n step_name = fqn.split(\".\")[-1]\n print_function(f\" > Running {step_name} step...\")\n try:\n # catch warnings to ensure deprecation warnings are printed\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\n \"always\",\n category=PrefectDeprecationWarning,\n )\n warnings.simplefilter(\n \"always\",\n category=DeprecationWarning,\n )\n step_output = await run_step(step, upstream_outputs)\n if w:\n printed_messages = []\n for warning in w:\n message = str(warning.message)\n # prevent duplicate warnings from being printed\n if message not in printed_messages:\n try:\n # try using rich styling\n print_function(message, style=\"yellow\")\n except Exception:\n # default to printing without styling\n print_function(message)\n printed_messages.append(message)\n\n if not isinstance(step_output, dict):\n if PREFECT_DEBUG_MODE:\n get_logger().warning(\n \"Step function %s returned unexpected type: %s\",\n fqn,\n type(step_output),\n )\n continue\n # store step output under step id to prevent clobbering\n if inputs.get(\"id\"):\n upstream_outputs[inputs.get(\"id\")] = step_output\n upstream_outputs.update(step_output)\n except Exception as exc:\n raise StepExecutionError(f\"Encountered error while running {fqn}\") from exc\n return upstream_outputs\n\n\ndef _get_step_fully_qualified_name_and_inputs(step: Dict) -> Tuple[str, Dict]:\n step = deepcopy(step)\n return step.popitem()\n", "path": "src/prefect/deployments/steps/core.py"}]} | 3,879 | 268 |
gh_patches_debug_63639 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-2239 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Logged out view of list of lists is empty
This is a 🤦 on my part -- it should directly query the list of public lists, instead of trying to use the redis cache, which relies on logged in users
</issue>
<code>
[start of bookwyrm/views/list/lists.py]
1 """ book list views"""
2 from django.contrib.auth.decorators import login_required
3 from django.core.paginator import Paginator
4 from django.shortcuts import redirect
5 from django.template.response import TemplateResponse
6 from django.utils.decorators import method_decorator
7 from django.views import View
8
9 from bookwyrm import forms, models
10 from bookwyrm.lists_stream import ListsStream
11 from bookwyrm.views.helpers import get_user_from_username
12
13
14 # pylint: disable=no-self-use
15 class Lists(View):
16 """book list page"""
17
18 def get(self, request):
19 """display a book list"""
20 lists = ListsStream().get_list_stream(request.user)
21 paginated = Paginator(lists, 12)
22 data = {
23 "lists": paginated.get_page(request.GET.get("page")),
24 "list_form": forms.ListForm(),
25 "path": "/list",
26 }
27 return TemplateResponse(request, "lists/lists.html", data)
28
29 @method_decorator(login_required, name="dispatch")
30 # pylint: disable=unused-argument
31 def post(self, request):
32 """create a book_list"""
33 form = forms.ListForm(request.POST)
34 if not form.is_valid():
35 return redirect("lists")
36 book_list = form.save()
37 # list should not have a group if it is not group curated
38 if not book_list.curation == "group":
39 book_list.group = None
40 book_list.save(broadcast=False)
41
42 return redirect(book_list.local_path)
43
44
45 @method_decorator(login_required, name="dispatch")
46 class SavedLists(View):
47 """saved book list page"""
48
49 def get(self, request):
50 """display book lists"""
51 # hide lists with no approved books
52 lists = request.user.saved_lists.order_by("-updated_date")
53
54 paginated = Paginator(lists, 12)
55 data = {
56 "lists": paginated.get_page(request.GET.get("page")),
57 "list_form": forms.ListForm(),
58 "path": "/list",
59 }
60 return TemplateResponse(request, "lists/lists.html", data)
61
62
63 @method_decorator(login_required, name="dispatch")
64 class UserLists(View):
65 """a user's book list page"""
66
67 def get(self, request, username):
68 """display a book list"""
69 user = get_user_from_username(request.user, username)
70 lists = models.List.privacy_filter(request.user).filter(user=user)
71 paginated = Paginator(lists, 12)
72
73 data = {
74 "user": user,
75 "is_self": request.user.id == user.id,
76 "lists": paginated.get_page(request.GET.get("page")),
77 "list_form": forms.ListForm(),
78 "path": user.local_path + "/lists",
79 }
80 return TemplateResponse(request, "user/lists.html", data)
81
[end of bookwyrm/views/list/lists.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bookwyrm/views/list/lists.py b/bookwyrm/views/list/lists.py
--- a/bookwyrm/views/list/lists.py
+++ b/bookwyrm/views/list/lists.py
@@ -17,7 +17,10 @@
def get(self, request):
"""display a book list"""
- lists = ListsStream().get_list_stream(request.user)
+ if request.user.is_authenticated:
+ lists = ListsStream().get_list_stream(request.user)
+ else:
+ lists = models.List.objects.filter(privacy="public")
paginated = Paginator(lists, 12)
data = {
"lists": paginated.get_page(request.GET.get("page")),
| {"golden_diff": "diff --git a/bookwyrm/views/list/lists.py b/bookwyrm/views/list/lists.py\n--- a/bookwyrm/views/list/lists.py\n+++ b/bookwyrm/views/list/lists.py\n@@ -17,7 +17,10 @@\n \n def get(self, request):\n \"\"\"display a book list\"\"\"\n- lists = ListsStream().get_list_stream(request.user)\n+ if request.user.is_authenticated:\n+ lists = ListsStream().get_list_stream(request.user)\n+ else:\n+ lists = models.List.objects.filter(privacy=\"public\")\n paginated = Paginator(lists, 12)\n data = {\n \"lists\": paginated.get_page(request.GET.get(\"page\")),\n", "issue": "Logged out view of list of lists is empty\nThis is a \ud83e\udd26 on my part -- it should directly query the list of public lists, instead of trying to use the redis cache, which relies on logged in users\n", "before_files": [{"content": "\"\"\" book list views\"\"\"\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.lists_stream import ListsStream\nfrom bookwyrm.views.helpers import get_user_from_username\n\n\n# pylint: disable=no-self-use\nclass Lists(View):\n \"\"\"book list page\"\"\"\n\n def get(self, request):\n \"\"\"display a book list\"\"\"\n lists = ListsStream().get_list_stream(request.user)\n paginated = Paginator(lists, 12)\n data = {\n \"lists\": paginated.get_page(request.GET.get(\"page\")),\n \"list_form\": forms.ListForm(),\n \"path\": \"/list\",\n }\n return TemplateResponse(request, \"lists/lists.html\", data)\n\n @method_decorator(login_required, name=\"dispatch\")\n # pylint: disable=unused-argument\n def post(self, request):\n \"\"\"create a book_list\"\"\"\n form = forms.ListForm(request.POST)\n if not form.is_valid():\n return redirect(\"lists\")\n book_list = form.save()\n # list should not have a group if it is not group curated\n if not book_list.curation == \"group\":\n book_list.group = None\n book_list.save(broadcast=False)\n\n return redirect(book_list.local_path)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass SavedLists(View):\n \"\"\"saved book list page\"\"\"\n\n def get(self, request):\n \"\"\"display book lists\"\"\"\n # hide lists with no approved books\n lists = request.user.saved_lists.order_by(\"-updated_date\")\n\n paginated = Paginator(lists, 12)\n data = {\n \"lists\": paginated.get_page(request.GET.get(\"page\")),\n \"list_form\": forms.ListForm(),\n \"path\": \"/list\",\n }\n return TemplateResponse(request, \"lists/lists.html\", data)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass UserLists(View):\n \"\"\"a user's book list page\"\"\"\n\n def get(self, request, username):\n \"\"\"display a book list\"\"\"\n user = get_user_from_username(request.user, username)\n lists = models.List.privacy_filter(request.user).filter(user=user)\n paginated = Paginator(lists, 12)\n\n data = {\n \"user\": user,\n \"is_self\": request.user.id == user.id,\n \"lists\": paginated.get_page(request.GET.get(\"page\")),\n \"list_form\": forms.ListForm(),\n \"path\": user.local_path + \"/lists\",\n }\n return TemplateResponse(request, \"user/lists.html\", data)\n", "path": "bookwyrm/views/list/lists.py"}]} | 1,318 | 149 |
gh_patches_debug_17668 | rasdani/github-patches | git_diff | scikit-image__scikit-image-3642 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"[Errno 36] File name too long:" when using imread on remote resource with long querystring
## Description
When using skimage.io.imread with a remote resource, a long query string on the remote resource will cause a failure to read the remote resource, because the temporary file cannot be created.
e.g.
The following works fine
```
>>> im = imread('https://c1.staticflickr.com/9/8370/8429454143_1066b73c04_o.jpg?{}'.format(''.join(['s' for i in range(100)])))
```
while the one below fails
```
>>> im = imread('https://c1.staticflickr.com/9/8370/8429454143_1066b73c04_o.jpg?{}'.format(''.join(['s' for i in range(300)])))
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/skimage/io/util.py", line 28, in file_or_url_context
with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as f:
File "/usr/lib/python3.5/tempfile.py", line 688, in NamedTemporaryFile
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type)
File "/usr/lib/python3.5/tempfile.py", line 399, in _mkstemp_inner
fd = _os.open(file, flags, 0o600)
OSError: [Errno 36] File name too long: '/tmp/tmpmfnujlq6.jpg?ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.5/dist-packages/skimage/io/_io.py", line 61, in imread
with file_or_url_context(fname) as fname:
File "/usr/lib/python3.5/contextlib.py", line 59, in __enter__
return next(self.gen)
File "/usr/local/lib/python3.5/dist-packages/skimage/io/util.py", line 34, in file_or_url_context
os.remove(f.name)
UnboundLocalError: local variable 'f' referenced before assignment
```
## Way to reproduce
[If reporting a bug, please include the following important information:]
- [x] Code example
- [x] Operating system and version
```
DISTRIB_ID=Ubuntu
DISTRIB_RELEASE=16.04
DISTRIB_CODENAME=xenial
DISTRIB_DESCRIPTION="Ubuntu 16.04.5 LTS"
```
- [x] Python version: Python 3.5.2
- [x] scikit-image version (run `skimage.__version__`): skimage.__version__
'0.14.1'
## Proposal
https://github.com/scikit-image/scikit-image/blob/d24634d86e4f90fe96377209d66ed114b9b601e4/skimage/io/util.py#L22
The querystring should be excluded from the temporary file name extension
</issue>
<code>
[start of skimage/io/util.py]
1 from urllib.request import urlopen
2
3 import os
4 import re
5 import tempfile
6 from contextlib import contextmanager
7
8
9 URL_REGEX = re.compile(r'http://|https://|ftp://|file://|file:\\')
10
11
12 def is_url(filename):
13 """Return True if string is an http or ftp path."""
14 return (isinstance(filename, str) and
15 URL_REGEX.match(filename) is not None)
16
17
18 @contextmanager
19 def file_or_url_context(resource_name):
20 """Yield name of file from the given resource (i.e. file or url)."""
21 if is_url(resource_name):
22 _, ext = os.path.splitext(resource_name)
23 try:
24 with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as f:
25 u = urlopen(resource_name)
26 f.write(u.read())
27 # f must be closed before yielding
28 yield f.name
29 finally:
30 os.remove(f.name)
31 else:
32 yield resource_name
33
[end of skimage/io/util.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/skimage/io/util.py b/skimage/io/util.py
--- a/skimage/io/util.py
+++ b/skimage/io/util.py
@@ -1,4 +1,4 @@
-from urllib.request import urlopen
+import urllib.parse
import os
import re
@@ -19,10 +19,11 @@
def file_or_url_context(resource_name):
"""Yield name of file from the given resource (i.e. file or url)."""
if is_url(resource_name):
- _, ext = os.path.splitext(resource_name)
+ url_components = urllib.parse.urlparse(resource_name)
+ _, ext = os.path.splitext(url_components.path)
try:
with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as f:
- u = urlopen(resource_name)
+ u = urllib.request.urlopen(resource_name)
f.write(u.read())
# f must be closed before yielding
yield f.name
| {"golden_diff": "diff --git a/skimage/io/util.py b/skimage/io/util.py\n--- a/skimage/io/util.py\n+++ b/skimage/io/util.py\n@@ -1,4 +1,4 @@\n-from urllib.request import urlopen\n+import urllib.parse\n \n import os\n import re\n@@ -19,10 +19,11 @@\n def file_or_url_context(resource_name):\n \"\"\"Yield name of file from the given resource (i.e. file or url).\"\"\"\n if is_url(resource_name):\n- _, ext = os.path.splitext(resource_name)\n+ url_components = urllib.parse.urlparse(resource_name)\n+ _, ext = os.path.splitext(url_components.path)\n try:\n with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as f:\n- u = urlopen(resource_name)\n+ u = urllib.request.urlopen(resource_name)\n f.write(u.read())\n # f must be closed before yielding\n yield f.name\n", "issue": "\"[Errno 36] File name too long:\" when using imread on remote resource with long querystring\n## Description\r\nWhen using skimage.io.imread with a remote resource, a long query string on the remote resource will cause a failure to read the remote resource, because the temporary file cannot be created.\r\n\r\ne.g. \r\n\r\nThe following works fine\r\n```\r\n>>> im = imread('https://c1.staticflickr.com/9/8370/8429454143_1066b73c04_o.jpg?{}'.format(''.join(['s' for i in range(100)])))\r\n\r\n```\r\n\r\nwhile the one below fails\r\n\r\n```\r\n>>> im = imread('https://c1.staticflickr.com/9/8370/8429454143_1066b73c04_o.jpg?{}'.format(''.join(['s' for i in range(300)])))\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.5/dist-packages/skimage/io/util.py\", line 28, in file_or_url_context\r\n with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as f:\r\n File \"/usr/lib/python3.5/tempfile.py\", line 688, in NamedTemporaryFile\r\n (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type)\r\n File \"/usr/lib/python3.5/tempfile.py\", line 399, in _mkstemp_inner\r\n fd = _os.open(file, flags, 0o600)\r\nOSError: [Errno 36] File name too long: '/tmp/tmpmfnujlq6.jpg?ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/usr/local/lib/python3.5/dist-packages/skimage/io/_io.py\", line 61, in imread\r\n with file_or_url_context(fname) as fname:\r\n File \"/usr/lib/python3.5/contextlib.py\", line 59, in __enter__\r\n return next(self.gen)\r\n File \"/usr/local/lib/python3.5/dist-packages/skimage/io/util.py\", line 34, in file_or_url_context\r\n os.remove(f.name)\r\nUnboundLocalError: local variable 'f' referenced before assignment\r\n\r\n```\r\n\r\n## Way to reproduce\r\n[If reporting a bug, please include the following important information:]\r\n- [x] Code example\r\n- [x] Operating system and version\r\n```\r\nDISTRIB_ID=Ubuntu\r\nDISTRIB_RELEASE=16.04\r\nDISTRIB_CODENAME=xenial\r\nDISTRIB_DESCRIPTION=\"Ubuntu 16.04.5 LTS\"\r\n```\r\n- [x] Python version: Python 3.5.2\r\n- [x] scikit-image version (run `skimage.__version__`): skimage.__version__\r\n'0.14.1'\r\n\r\n## Proposal\r\n\r\nhttps://github.com/scikit-image/scikit-image/blob/d24634d86e4f90fe96377209d66ed114b9b601e4/skimage/io/util.py#L22\r\n\r\nThe querystring should be excluded from the temporary file name extension\n", "before_files": [{"content": "from urllib.request import urlopen\n\nimport os\nimport re\nimport tempfile\nfrom contextlib import contextmanager\n\n\nURL_REGEX = re.compile(r'http://|https://|ftp://|file://|file:\\\\')\n\n\ndef is_url(filename):\n \"\"\"Return True if string is an http or ftp path.\"\"\"\n return (isinstance(filename, str) and\n URL_REGEX.match(filename) is not None)\n\n\n@contextmanager\ndef file_or_url_context(resource_name):\n \"\"\"Yield name of file from the given resource (i.e. file or url).\"\"\"\n if is_url(resource_name):\n _, ext = os.path.splitext(resource_name)\n try:\n with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as f:\n u = urlopen(resource_name)\n f.write(u.read())\n # f must be closed before yielding\n yield f.name\n finally:\n os.remove(f.name)\n else:\n yield resource_name\n", "path": "skimage/io/util.py"}]} | 1,678 | 206 |
gh_patches_debug_25033 | rasdani/github-patches | git_diff | apache__airflow-6783 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[AIRFLOW-3014] Fix multiple alembic heads
Make sure you have checked _all_ steps below.
### Jira
- [ ] My PR addresses the following [Airflow Jira](https://issues.apache.org/jira/browse/AIRFLOW/) issues and references them in the PR title. For example, "\[AIRFLOW-XXX\] My Airflow PR"
- https://issues.apache.org/jira/browse/AIRFLOW-6224
- In case you are fixing a typo in the documentation you can prepend your commit with \[AIRFLOW-XXX\], code changes always need a Jira issue.
- In case you are proposing a fundamental code change, you need to create an Airflow Improvement Proposal ([AIP](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvements+Proposals)).
- In case you are adding a dependency, check if the license complies with the [ASF 3rd Party License Policy](https://www.apache.org/legal/resolved.html#category-x).
### Description
- [ ] Here are some details about my PR, including screenshots of any UI changes:
### Tests
- [ ] My PR adds the following unit tests __OR__ does not need testing for this extremely good reason:
### Commits
- [ ] My commits all reference Jira issues in their subject lines, and I have squashed multiple commits if they address the same issue. In addition, my commits follow the guidelines from "[How to write a good git commit message](http://chris.beams.io/posts/git-commit/)":
1. Subject is separated from body by a blank line
1. Subject is limited to 50 characters (not including Jira issue reference)
1. Subject does not end with a period
1. Subject uses the imperative mood ("add", not "adding")
1. Body wraps at 72 characters
1. Body explains "what" and "why", not "how"
### Documentation
- [ ] In case of new functionality, my PR adds documentation that describes how to use it.
- All the public functions and the classes in the PR contain docstrings that explain what it does
- If you implement backwards incompatible changes, please leave a note in the [Updating.md](https://github.com/apache/airflow/blob/master/UPDATING.md) so we can assign it to a appropriate release
</issue>
<code>
[start of airflow/migrations/versions/c1840b4bcf1a_increase_length_of_password_column_in_.py]
1 #
2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
17 # under the License.
18
19 """Increase length of password column in connection table
20
21 Revision ID: c1840b4bcf1a
22 Revises: 004c1210f153
23 Create Date: 2019-10-02 16:56:54.865550
24
25 """
26
27 import sqlalchemy as sa
28 from alembic import op
29
30 # revision identifiers, used by Alembic.
31 revision = 'c1840b4bcf1a'
32 down_revision = '004c1210f153'
33 branch_labels = None
34 depends_on = None
35
36
37 def upgrade():
38 conn = op.get_bind()
39 if conn.dialect.name == 'sqlite':
40 # SQLite does not allow column modifications so we need to skip this migration
41 return
42
43 op.alter_column(table_name='connection',
44 column_name='password',
45 type_=sa.String(length=5000))
46
47
48 def downgrade():
49 # Can't be undone
50 pass
51
[end of airflow/migrations/versions/c1840b4bcf1a_increase_length_of_password_column_in_.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/airflow/migrations/versions/c1840b4bcf1a_increase_length_of_password_column_in_.py b/airflow/migrations/versions/c1840b4bcf1a_increase_length_of_password_column_in_.py
deleted file mode 100644
--- a/airflow/migrations/versions/c1840b4bcf1a_increase_length_of_password_column_in_.py
+++ /dev/null
@@ -1,50 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-"""Increase length of password column in connection table
-
-Revision ID: c1840b4bcf1a
-Revises: 004c1210f153
-Create Date: 2019-10-02 16:56:54.865550
-
-"""
-
-import sqlalchemy as sa
-from alembic import op
-
-# revision identifiers, used by Alembic.
-revision = 'c1840b4bcf1a'
-down_revision = '004c1210f153'
-branch_labels = None
-depends_on = None
-
-
-def upgrade():
- conn = op.get_bind()
- if conn.dialect.name == 'sqlite':
- # SQLite does not allow column modifications so we need to skip this migration
- return
-
- op.alter_column(table_name='connection',
- column_name='password',
- type_=sa.String(length=5000))
-
-
-def downgrade():
- # Can't be undone
- pass
| {"golden_diff": "diff --git a/airflow/migrations/versions/c1840b4bcf1a_increase_length_of_password_column_in_.py b/airflow/migrations/versions/c1840b4bcf1a_increase_length_of_password_column_in_.py\ndeleted file mode 100644\n--- a/airflow/migrations/versions/c1840b4bcf1a_increase_length_of_password_column_in_.py\n+++ /dev/null\n@@ -1,50 +0,0 @@\n-#\n-# Licensed to the Apache Software Foundation (ASF) under one\n-# or more contributor license agreements. See the NOTICE file\n-# distributed with this work for additional information\n-# regarding copyright ownership. The ASF licenses this file\n-# to you under the Apache License, Version 2.0 (the\n-# \"License\"); you may not use this file except in compliance\n-# with the License. You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing,\n-# software distributed under the License is distributed on an\n-# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n-# KIND, either express or implied. See the License for the\n-# specific language governing permissions and limitations\n-# under the License.\n-\n-\"\"\"Increase length of password column in connection table\n-\n-Revision ID: c1840b4bcf1a\n-Revises: 004c1210f153\n-Create Date: 2019-10-02 16:56:54.865550\n-\n-\"\"\"\n-\n-import sqlalchemy as sa\n-from alembic import op\n-\n-# revision identifiers, used by Alembic.\n-revision = 'c1840b4bcf1a'\n-down_revision = '004c1210f153'\n-branch_labels = None\n-depends_on = None\n-\n-\n-def upgrade():\n- conn = op.get_bind()\n- if conn.dialect.name == 'sqlite':\n- # SQLite does not allow column modifications so we need to skip this migration\n- return\n-\n- op.alter_column(table_name='connection',\n- column_name='password',\n- type_=sa.String(length=5000))\n-\n-\n-def downgrade():\n- # Can't be undone\n- pass\n", "issue": "[AIRFLOW-3014] Fix multiple alembic heads\nMake sure you have checked _all_ steps below.\r\n\r\n### Jira\r\n\r\n- [ ] My PR addresses the following [Airflow Jira](https://issues.apache.org/jira/browse/AIRFLOW/) issues and references them in the PR title. For example, \"\\[AIRFLOW-XXX\\] My Airflow PR\"\r\n - https://issues.apache.org/jira/browse/AIRFLOW-6224\r\n - In case you are fixing a typo in the documentation you can prepend your commit with \\[AIRFLOW-XXX\\], code changes always need a Jira issue.\r\n - In case you are proposing a fundamental code change, you need to create an Airflow Improvement Proposal ([AIP](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvements+Proposals)).\r\n - In case you are adding a dependency, check if the license complies with the [ASF 3rd Party License Policy](https://www.apache.org/legal/resolved.html#category-x).\r\n\r\n### Description\r\n\r\n- [ ] Here are some details about my PR, including screenshots of any UI changes:\r\n\r\n### Tests\r\n\r\n- [ ] My PR adds the following unit tests __OR__ does not need testing for this extremely good reason:\r\n\r\n### Commits\r\n\r\n- [ ] My commits all reference Jira issues in their subject lines, and I have squashed multiple commits if they address the same issue. In addition, my commits follow the guidelines from \"[How to write a good git commit message](http://chris.beams.io/posts/git-commit/)\":\r\n 1. Subject is separated from body by a blank line\r\n 1. Subject is limited to 50 characters (not including Jira issue reference)\r\n 1. Subject does not end with a period\r\n 1. Subject uses the imperative mood (\"add\", not \"adding\")\r\n 1. Body wraps at 72 characters\r\n 1. Body explains \"what\" and \"why\", not \"how\"\r\n\r\n### Documentation\r\n\r\n- [ ] In case of new functionality, my PR adds documentation that describes how to use it.\r\n - All the public functions and the classes in the PR contain docstrings that explain what it does\r\n - If you implement backwards incompatible changes, please leave a note in the [Updating.md](https://github.com/apache/airflow/blob/master/UPDATING.md) so we can assign it to a appropriate release\r\n\n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Increase length of password column in connection table\n\nRevision ID: c1840b4bcf1a\nRevises: 004c1210f153\nCreate Date: 2019-10-02 16:56:54.865550\n\n\"\"\"\n\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = 'c1840b4bcf1a'\ndown_revision = '004c1210f153'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n conn = op.get_bind()\n if conn.dialect.name == 'sqlite':\n # SQLite does not allow column modifications so we need to skip this migration\n return\n\n op.alter_column(table_name='connection',\n column_name='password',\n type_=sa.String(length=5000))\n\n\ndef downgrade():\n # Can't be undone\n pass\n", "path": "airflow/migrations/versions/c1840b4bcf1a_increase_length_of_password_column_in_.py"}]} | 1,587 | 553 |
gh_patches_debug_67223 | rasdani/github-patches | git_diff | svthalia__concrexit-1867 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix payable registry cache using old data
### Describe the bug
Payments are a mess. For example, if you pay for an event registration, delete the payment (through the admin or api), creating a new TPay payment through the api fails with 409 Conflict, there is still a payment in the registration model, but not in the payments api. Furthermore, paying with a different payment type works, but those payments can then not be removed. Also see #1806. I suspect there are many more related problems.
### How to reproduce
Play around with payable things, deleting and recreating them, or deleting and recreating payments.
### Expected behaviour
When a payable is not paid and should be payable with TPay, paying does not fail. Deleting a payment makes the payable not-paid as it was before creating the payment. Deleting or changing a payable is either impossible, or also deletes a payment that belongs to it.
### Additional context
I think it would be a good idea to combine this with #1000. Some test-driven development would make sense for payments, and I think the expected behaviour should be well-testable. Of course the problems may not be entirely within the payments app, but also in the payables defined by other apps.
</issue>
<code>
[start of website/payments/payables.py]
1 from functools import lru_cache
2
3 from django.db.models import Model
4
5 _registry = {}
6
7
8 class NotRegistered(Exception):
9 pass
10
11
12 class Payable:
13 def __init__(self, model: Model):
14 self.model = model
15
16 @property
17 def pk(self):
18 return self.model.pk
19
20 @property
21 def payment(self):
22 return self.model.payment
23
24 @payment.setter
25 def payment(self, payment):
26 self.model.payment = payment
27
28 @property
29 def payment_amount(self):
30 raise NotImplementedError
31
32 @property
33 def payment_topic(self):
34 raise NotImplementedError
35
36 @property
37 def payment_notes(self):
38 raise NotImplementedError
39
40 @property
41 def payment_payer(self):
42 raise NotImplementedError
43
44 @property
45 def tpay_allowed(self):
46 return True
47
48 def can_manage_payment(self, member):
49 raise NotImplementedError
50
51
52 class Payables:
53 _registry = {}
54
55 @lru_cache(maxsize=None)
56 def _get_key(self, model):
57 return f"{model._meta.app_label}_{model._meta.model_name}"
58
59 @lru_cache(maxsize=None)
60 def get_payable(self, model: Model) -> Payable:
61 if self._get_key(model) not in self._registry:
62 raise NotRegistered(f"No Payable registered for {self._get_key(model)}")
63 return self._registry[self._get_key(model)](model)
64
65 def register(self, model: Model, payable_class: Payable):
66 self._registry[self._get_key(model)] = payable_class
67
68
69 payables = Payables()
70
[end of website/payments/payables.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/payments/payables.py b/website/payments/payables.py
--- a/website/payments/payables.py
+++ b/website/payments/payables.py
@@ -56,7 +56,6 @@
def _get_key(self, model):
return f"{model._meta.app_label}_{model._meta.model_name}"
- @lru_cache(maxsize=None)
def get_payable(self, model: Model) -> Payable:
if self._get_key(model) not in self._registry:
raise NotRegistered(f"No Payable registered for {self._get_key(model)}")
| {"golden_diff": "diff --git a/website/payments/payables.py b/website/payments/payables.py\n--- a/website/payments/payables.py\n+++ b/website/payments/payables.py\n@@ -56,7 +56,6 @@\n def _get_key(self, model):\n return f\"{model._meta.app_label}_{model._meta.model_name}\"\n \n- @lru_cache(maxsize=None)\n def get_payable(self, model: Model) -> Payable:\n if self._get_key(model) not in self._registry:\n raise NotRegistered(f\"No Payable registered for {self._get_key(model)}\")\n", "issue": "Fix payable registry cache using old data\n### Describe the bug\r\nPayments are a mess. For example, if you pay for an event registration, delete the payment (through the admin or api), creating a new TPay payment through the api fails with 409 Conflict, there is still a payment in the registration model, but not in the payments api. Furthermore, paying with a different payment type works, but those payments can then not be removed. Also see #1806. I suspect there are many more related problems.\r\n\r\n### How to reproduce\r\nPlay around with payable things, deleting and recreating them, or deleting and recreating payments.\r\n\r\n### Expected behaviour\r\nWhen a payable is not paid and should be payable with TPay, paying does not fail. Deleting a payment makes the payable not-paid as it was before creating the payment. Deleting or changing a payable is either impossible, or also deletes a payment that belongs to it.\r\n\r\n### Additional context\r\nI think it would be a good idea to combine this with #1000. Some test-driven development would make sense for payments, and I think the expected behaviour should be well-testable. Of course the problems may not be entirely within the payments app, but also in the payables defined by other apps.\r\n\n", "before_files": [{"content": "from functools import lru_cache\n\nfrom django.db.models import Model\n\n_registry = {}\n\n\nclass NotRegistered(Exception):\n pass\n\n\nclass Payable:\n def __init__(self, model: Model):\n self.model = model\n\n @property\n def pk(self):\n return self.model.pk\n\n @property\n def payment(self):\n return self.model.payment\n\n @payment.setter\n def payment(self, payment):\n self.model.payment = payment\n\n @property\n def payment_amount(self):\n raise NotImplementedError\n\n @property\n def payment_topic(self):\n raise NotImplementedError\n\n @property\n def payment_notes(self):\n raise NotImplementedError\n\n @property\n def payment_payer(self):\n raise NotImplementedError\n\n @property\n def tpay_allowed(self):\n return True\n\n def can_manage_payment(self, member):\n raise NotImplementedError\n\n\nclass Payables:\n _registry = {}\n\n @lru_cache(maxsize=None)\n def _get_key(self, model):\n return f\"{model._meta.app_label}_{model._meta.model_name}\"\n\n @lru_cache(maxsize=None)\n def get_payable(self, model: Model) -> Payable:\n if self._get_key(model) not in self._registry:\n raise NotRegistered(f\"No Payable registered for {self._get_key(model)}\")\n return self._registry[self._get_key(model)](model)\n\n def register(self, model: Model, payable_class: Payable):\n self._registry[self._get_key(model)] = payable_class\n\n\npayables = Payables()\n", "path": "website/payments/payables.py"}]} | 1,280 | 135 |
gh_patches_debug_29304 | rasdani/github-patches | git_diff | cal-itp__benefits-755 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Ensure relevant analytics events are fired for Login.gov IAL2 flow
Design to collaborate with Dev to ensure the behaviors they are interested in understanding are defined in Amplitude.
- [x] `cancel_sign_in`
</issue>
<code>
[start of benefits/oauth/analytics.py]
1 """
2 The oauth application: analytics implementation.
3 """
4 from benefits.core import analytics as core, session
5
6
7 class OAuthEvent(core.Event):
8 """Base OAuth analytics event."""
9
10 def __init__(self, request, event_type):
11 super().__init__(request, event_type)
12 verifier = session.verifier(request)
13 self.update_event_properties(auth_provider=verifier.auth_provider.client_name)
14
15
16 class StartedSignInEvent(OAuthEvent):
17 """Analytics event representing the beginning of the OAuth sign in flow."""
18
19 def __init__(self, request):
20 super().__init__(request, "started sign in")
21
22
23 class FinishedSignInEvent(OAuthEvent):
24 """Analytics event representing the end of the OAuth sign in flow."""
25
26 def __init__(self, request):
27 super().__init__(request, "finished sign in")
28
29
30 class StartedSignOutEvent(OAuthEvent):
31 """Analytics event representing the beginning of application sign out."""
32
33 def __init__(self, request):
34 super().__init__(request, "started signed out")
35
36
37 class FinishedSignOutEvent(OAuthEvent):
38 """Analytics event representing the end of application sign out."""
39
40 def __init__(self, request):
41 super().__init__(request, "finished sign out")
42 self.update_event_properties(origin=session.origin(request))
43
44
45 def started_sign_in(request):
46 """Send the "started sign in" analytics event."""
47 core.send_event(StartedSignInEvent(request))
48
49
50 def finished_sign_in(request):
51 """Send the "finished sign in" analytics event."""
52 core.send_event(FinishedSignInEvent(request))
53
54
55 def started_sign_out(request):
56 """Send the "started signed out" analytics event."""
57 core.send_event(StartedSignOutEvent(request))
58
59
60 def finished_sign_out(request):
61 """Send the "finished sign out" analytics event."""
62 core.send_event(FinishedSignOutEvent(request))
63
[end of benefits/oauth/analytics.py]
[start of benefits/oauth/views.py]
1 import logging
2
3 from django.shortcuts import redirect
4 from django.urls import reverse
5 from django.utils.decorators import decorator_from_middleware
6
7 from benefits.core import session
8 from benefits.core.middleware import VerifierSessionRequired
9 from . import analytics, redirects
10 from .client import oauth
11
12
13 logger = logging.getLogger(__name__)
14
15
16 ROUTE_AUTH = "oauth:authorize"
17 ROUTE_START = "eligibility:start"
18 ROUTE_CONFIRM = "eligibility:confirm"
19 ROUTE_UNVERIFIED = "eligibility:unverified"
20 ROUTE_POST_LOGOUT = "oauth:post_logout"
21
22
23 @decorator_from_middleware(VerifierSessionRequired)
24 def login(request):
25 """View implementing OIDC authorize_redirect."""
26 verifier = session.verifier(request)
27 oauth_client = oauth.create_client(verifier.auth_provider.client_name)
28
29 if not oauth_client:
30 raise Exception(f"oauth_client not registered: {verifier.auth_provider.client_name}")
31
32 route = reverse(ROUTE_AUTH)
33 redirect_uri = redirects.generate_redirect_uri(request, route)
34
35 logger.debug(f"OAuth authorize_redirect with redirect_uri: {redirect_uri}")
36
37 analytics.started_sign_in(request)
38
39 return oauth_client.authorize_redirect(request, redirect_uri)
40
41
42 @decorator_from_middleware(VerifierSessionRequired)
43 def authorize(request):
44 """View implementing OIDC token authorization."""
45 verifier = session.verifier(request)
46 oauth_client = oauth.create_client(verifier.auth_provider.client_name)
47
48 if not oauth_client:
49 raise Exception(f"oauth_client not registered: {verifier.auth_provider.client_name}")
50
51 logger.debug("Attempting to authorize OAuth access token")
52 token = oauth_client.authorize_access_token(request)
53
54 if token is None:
55 logger.warning("Could not authorize OAuth access token")
56 return redirect(ROUTE_START)
57
58 logger.debug("OAuth access token authorized")
59
60 # We store the id_token in the user's session. This is the minimal amount of information needed later to log the user out.
61 id_token = token["id_token"]
62
63 # We store the returned claim in case it can be used later in eligibility verification.
64 verifier_claim = verifier.auth_provider.claim
65 stored_claim = None
66
67 if verifier_claim:
68 userinfo = token.get("userinfo")
69 # the claim comes back in userinfo like { "claim": "True" | "False" }
70 claim_flag = (userinfo.get(verifier_claim) if userinfo else "false").lower() == "true"
71 # if userinfo contains our claim and the flag is true, store the *claim*
72 stored_claim = verifier_claim if claim_flag else None
73
74 session.update(request, oauth_token=id_token, oauth_claim=stored_claim)
75
76 analytics.finished_sign_in(request)
77
78 return redirect(ROUTE_CONFIRM)
79
80
81 def cancel(request):
82 """View implementing cancellation of OIDC authorization."""
83 return redirect(ROUTE_UNVERIFIED)
84
85
86 @decorator_from_middleware(VerifierSessionRequired)
87 def logout(request):
88 """View implementing OIDC and application sign out."""
89 verifier = session.verifier(request)
90 oauth_client = oauth.create_client(verifier.auth_provider.client_name)
91
92 if not oauth_client:
93 raise Exception(f"oauth_client not registered: {verifier.auth_provider.client_name}")
94
95 analytics.started_sign_out(request)
96
97 # overwrite the oauth session token, the user is signed out of the app
98 token = session.oauth_token(request)
99 session.logout(request)
100
101 route = reverse(ROUTE_POST_LOGOUT)
102 redirect_uri = redirects.generate_redirect_uri(request, route)
103
104 logger.debug(f"OAuth end_session_endpoint with redirect_uri: {redirect_uri}")
105
106 # send the user through the end_session_endpoint, redirecting back to
107 # the post_logout route
108 return redirects.deauthorize_redirect(oauth_client, token, redirect_uri)
109
110
111 def post_logout(request):
112 """View routes the user to their origin after sign out."""
113
114 analytics.finished_sign_out(request)
115
116 origin = session.origin(request)
117 return redirect(origin)
118
[end of benefits/oauth/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/benefits/oauth/analytics.py b/benefits/oauth/analytics.py
--- a/benefits/oauth/analytics.py
+++ b/benefits/oauth/analytics.py
@@ -20,6 +20,13 @@
super().__init__(request, "started sign in")
+class CanceledSignInEvent(OAuthEvent):
+ """Analytics event representing the canceling of application sign in."""
+
+ def __init__(self, request):
+ super().__init__(request, "canceled sign in")
+
+
class FinishedSignInEvent(OAuthEvent):
"""Analytics event representing the end of the OAuth sign in flow."""
@@ -31,7 +38,7 @@
"""Analytics event representing the beginning of application sign out."""
def __init__(self, request):
- super().__init__(request, "started signed out")
+ super().__init__(request, "started sign out")
class FinishedSignOutEvent(OAuthEvent):
@@ -47,6 +54,11 @@
core.send_event(StartedSignInEvent(request))
+def canceled_sign_in(request):
+ """Send the "canceled sign in" analytics event."""
+ core.send_event(CanceledSignInEvent(request))
+
+
def finished_sign_in(request):
"""Send the "finished sign in" analytics event."""
core.send_event(FinishedSignInEvent(request))
diff --git a/benefits/oauth/views.py b/benefits/oauth/views.py
--- a/benefits/oauth/views.py
+++ b/benefits/oauth/views.py
@@ -80,6 +80,9 @@
def cancel(request):
"""View implementing cancellation of OIDC authorization."""
+
+ analytics.canceled_sign_in(request)
+
return redirect(ROUTE_UNVERIFIED)
| {"golden_diff": "diff --git a/benefits/oauth/analytics.py b/benefits/oauth/analytics.py\n--- a/benefits/oauth/analytics.py\n+++ b/benefits/oauth/analytics.py\n@@ -20,6 +20,13 @@\n super().__init__(request, \"started sign in\")\n \n \n+class CanceledSignInEvent(OAuthEvent):\n+ \"\"\"Analytics event representing the canceling of application sign in.\"\"\"\n+\n+ def __init__(self, request):\n+ super().__init__(request, \"canceled sign in\")\n+\n+\n class FinishedSignInEvent(OAuthEvent):\n \"\"\"Analytics event representing the end of the OAuth sign in flow.\"\"\"\n \n@@ -31,7 +38,7 @@\n \"\"\"Analytics event representing the beginning of application sign out.\"\"\"\n \n def __init__(self, request):\n- super().__init__(request, \"started signed out\")\n+ super().__init__(request, \"started sign out\")\n \n \n class FinishedSignOutEvent(OAuthEvent):\n@@ -47,6 +54,11 @@\n core.send_event(StartedSignInEvent(request))\n \n \n+def canceled_sign_in(request):\n+ \"\"\"Send the \"canceled sign in\" analytics event.\"\"\"\n+ core.send_event(CanceledSignInEvent(request))\n+\n+\n def finished_sign_in(request):\n \"\"\"Send the \"finished sign in\" analytics event.\"\"\"\n core.send_event(FinishedSignInEvent(request))\ndiff --git a/benefits/oauth/views.py b/benefits/oauth/views.py\n--- a/benefits/oauth/views.py\n+++ b/benefits/oauth/views.py\n@@ -80,6 +80,9 @@\n \n def cancel(request):\n \"\"\"View implementing cancellation of OIDC authorization.\"\"\"\n+\n+ analytics.canceled_sign_in(request)\n+\n return redirect(ROUTE_UNVERIFIED)\n", "issue": "Ensure relevant analytics events are fired for Login.gov IAL2 flow\nDesign to collaborate with Dev to ensure the behaviors they are interested in understanding are defined in Amplitude. \r\n\r\n- [x] `cancel_sign_in`\n", "before_files": [{"content": "\"\"\"\nThe oauth application: analytics implementation.\n\"\"\"\nfrom benefits.core import analytics as core, session\n\n\nclass OAuthEvent(core.Event):\n \"\"\"Base OAuth analytics event.\"\"\"\n\n def __init__(self, request, event_type):\n super().__init__(request, event_type)\n verifier = session.verifier(request)\n self.update_event_properties(auth_provider=verifier.auth_provider.client_name)\n\n\nclass StartedSignInEvent(OAuthEvent):\n \"\"\"Analytics event representing the beginning of the OAuth sign in flow.\"\"\"\n\n def __init__(self, request):\n super().__init__(request, \"started sign in\")\n\n\nclass FinishedSignInEvent(OAuthEvent):\n \"\"\"Analytics event representing the end of the OAuth sign in flow.\"\"\"\n\n def __init__(self, request):\n super().__init__(request, \"finished sign in\")\n\n\nclass StartedSignOutEvent(OAuthEvent):\n \"\"\"Analytics event representing the beginning of application sign out.\"\"\"\n\n def __init__(self, request):\n super().__init__(request, \"started signed out\")\n\n\nclass FinishedSignOutEvent(OAuthEvent):\n \"\"\"Analytics event representing the end of application sign out.\"\"\"\n\n def __init__(self, request):\n super().__init__(request, \"finished sign out\")\n self.update_event_properties(origin=session.origin(request))\n\n\ndef started_sign_in(request):\n \"\"\"Send the \"started sign in\" analytics event.\"\"\"\n core.send_event(StartedSignInEvent(request))\n\n\ndef finished_sign_in(request):\n \"\"\"Send the \"finished sign in\" analytics event.\"\"\"\n core.send_event(FinishedSignInEvent(request))\n\n\ndef started_sign_out(request):\n \"\"\"Send the \"started signed out\" analytics event.\"\"\"\n core.send_event(StartedSignOutEvent(request))\n\n\ndef finished_sign_out(request):\n \"\"\"Send the \"finished sign out\" analytics event.\"\"\"\n core.send_event(FinishedSignOutEvent(request))\n", "path": "benefits/oauth/analytics.py"}, {"content": "import logging\n\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\nfrom django.utils.decorators import decorator_from_middleware\n\nfrom benefits.core import session\nfrom benefits.core.middleware import VerifierSessionRequired\nfrom . import analytics, redirects\nfrom .client import oauth\n\n\nlogger = logging.getLogger(__name__)\n\n\nROUTE_AUTH = \"oauth:authorize\"\nROUTE_START = \"eligibility:start\"\nROUTE_CONFIRM = \"eligibility:confirm\"\nROUTE_UNVERIFIED = \"eligibility:unverified\"\nROUTE_POST_LOGOUT = \"oauth:post_logout\"\n\n\n@decorator_from_middleware(VerifierSessionRequired)\ndef login(request):\n \"\"\"View implementing OIDC authorize_redirect.\"\"\"\n verifier = session.verifier(request)\n oauth_client = oauth.create_client(verifier.auth_provider.client_name)\n\n if not oauth_client:\n raise Exception(f\"oauth_client not registered: {verifier.auth_provider.client_name}\")\n\n route = reverse(ROUTE_AUTH)\n redirect_uri = redirects.generate_redirect_uri(request, route)\n\n logger.debug(f\"OAuth authorize_redirect with redirect_uri: {redirect_uri}\")\n\n analytics.started_sign_in(request)\n\n return oauth_client.authorize_redirect(request, redirect_uri)\n\n\n@decorator_from_middleware(VerifierSessionRequired)\ndef authorize(request):\n \"\"\"View implementing OIDC token authorization.\"\"\"\n verifier = session.verifier(request)\n oauth_client = oauth.create_client(verifier.auth_provider.client_name)\n\n if not oauth_client:\n raise Exception(f\"oauth_client not registered: {verifier.auth_provider.client_name}\")\n\n logger.debug(\"Attempting to authorize OAuth access token\")\n token = oauth_client.authorize_access_token(request)\n\n if token is None:\n logger.warning(\"Could not authorize OAuth access token\")\n return redirect(ROUTE_START)\n\n logger.debug(\"OAuth access token authorized\")\n\n # We store the id_token in the user's session. This is the minimal amount of information needed later to log the user out.\n id_token = token[\"id_token\"]\n\n # We store the returned claim in case it can be used later in eligibility verification.\n verifier_claim = verifier.auth_provider.claim\n stored_claim = None\n\n if verifier_claim:\n userinfo = token.get(\"userinfo\")\n # the claim comes back in userinfo like { \"claim\": \"True\" | \"False\" }\n claim_flag = (userinfo.get(verifier_claim) if userinfo else \"false\").lower() == \"true\"\n # if userinfo contains our claim and the flag is true, store the *claim*\n stored_claim = verifier_claim if claim_flag else None\n\n session.update(request, oauth_token=id_token, oauth_claim=stored_claim)\n\n analytics.finished_sign_in(request)\n\n return redirect(ROUTE_CONFIRM)\n\n\ndef cancel(request):\n \"\"\"View implementing cancellation of OIDC authorization.\"\"\"\n return redirect(ROUTE_UNVERIFIED)\n\n\n@decorator_from_middleware(VerifierSessionRequired)\ndef logout(request):\n \"\"\"View implementing OIDC and application sign out.\"\"\"\n verifier = session.verifier(request)\n oauth_client = oauth.create_client(verifier.auth_provider.client_name)\n\n if not oauth_client:\n raise Exception(f\"oauth_client not registered: {verifier.auth_provider.client_name}\")\n\n analytics.started_sign_out(request)\n\n # overwrite the oauth session token, the user is signed out of the app\n token = session.oauth_token(request)\n session.logout(request)\n\n route = reverse(ROUTE_POST_LOGOUT)\n redirect_uri = redirects.generate_redirect_uri(request, route)\n\n logger.debug(f\"OAuth end_session_endpoint with redirect_uri: {redirect_uri}\")\n\n # send the user through the end_session_endpoint, redirecting back to\n # the post_logout route\n return redirects.deauthorize_redirect(oauth_client, token, redirect_uri)\n\n\ndef post_logout(request):\n \"\"\"View routes the user to their origin after sign out.\"\"\"\n\n analytics.finished_sign_out(request)\n\n origin = session.origin(request)\n return redirect(origin)\n", "path": "benefits/oauth/views.py"}]} | 2,188 | 379 |
gh_patches_debug_37702 | rasdani/github-patches | git_diff | Textualize__textual-2605 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add a `description` parameter to the work decorator, to use in place of the auto-generated description.
</issue>
<code>
[start of src/textual/_work_decorator.py]
1 """
2
3 A decorator used to create [workers](/guide/workers).
4 """
5
6
7 from __future__ import annotations
8
9 from functools import partial, wraps
10 from typing import TYPE_CHECKING, Callable, Coroutine, TypeVar, Union, cast, overload
11
12 from typing_extensions import ParamSpec, TypeAlias
13
14 if TYPE_CHECKING:
15 from .worker import Worker
16
17
18 FactoryParamSpec = ParamSpec("FactoryParamSpec")
19 DecoratorParamSpec = ParamSpec("DecoratorParamSpec")
20 ReturnType = TypeVar("ReturnType")
21
22 Decorator: TypeAlias = Callable[
23 [
24 Union[
25 Callable[DecoratorParamSpec, ReturnType],
26 Callable[DecoratorParamSpec, Coroutine[None, None, ReturnType]],
27 ]
28 ],
29 Callable[DecoratorParamSpec, "Worker[ReturnType]"],
30 ]
31
32
33 @overload
34 def work(
35 method: Callable[FactoryParamSpec, Coroutine[None, None, ReturnType]]
36 ) -> Callable[FactoryParamSpec, "Worker[ReturnType]"]:
37 ...
38
39
40 @overload
41 def work(
42 method: Callable[FactoryParamSpec, ReturnType]
43 ) -> Callable[FactoryParamSpec, "Worker[ReturnType]"]:
44 ...
45
46
47 @overload
48 def work(*, exclusive: bool = False) -> Decorator[..., ReturnType]:
49 ...
50
51
52 def work(
53 method: Callable[FactoryParamSpec, ReturnType]
54 | Callable[FactoryParamSpec, Coroutine[None, None, ReturnType]]
55 | None = None,
56 *,
57 name: str = "",
58 group: str = "default",
59 exit_on_error: bool = True,
60 exclusive: bool = False,
61 ) -> Callable[FactoryParamSpec, Worker[ReturnType]] | Decorator:
62 """A decorator used to create [workers](/guide/workers).
63
64 Args:
65 method: A function or coroutine.
66 name: A short string to identify the worker (in logs and debugging).
67 group: A short string to identify a group of workers.
68 exit_on_error: Exit the app if the worker raises an error. Set to `False` to suppress exceptions.
69 exclusive: Cancel all workers in the same group.
70 """
71
72 def decorator(
73 method: (
74 Callable[DecoratorParamSpec, ReturnType]
75 | Callable[DecoratorParamSpec, Coroutine[None, None, ReturnType]]
76 )
77 ) -> Callable[DecoratorParamSpec, Worker[ReturnType]]:
78 """The decorator."""
79
80 @wraps(method)
81 def decorated(
82 *args: DecoratorParamSpec.args, **kwargs: DecoratorParamSpec.kwargs
83 ) -> Worker[ReturnType]:
84 """The replaced callable."""
85 from .dom import DOMNode
86
87 self = args[0]
88 assert isinstance(self, DOMNode)
89
90 try:
91 positional_arguments = ", ".join(repr(arg) for arg in args[1:])
92 keyword_arguments = ", ".join(
93 f"{name}={value!r}" for name, value in kwargs.items()
94 )
95 tokens = [positional_arguments, keyword_arguments]
96 worker_description = f"{method.__name__}({', '.join(token for token in tokens if token)})"
97 except Exception:
98 worker_description = "<worker>"
99 worker = cast(
100 "Worker[ReturnType]",
101 self.run_worker(
102 partial(method, *args, **kwargs),
103 name=name or method.__name__,
104 group=group,
105 description=worker_description,
106 exclusive=exclusive,
107 exit_on_error=exit_on_error,
108 ),
109 )
110 return worker
111
112 return decorated
113
114 if method is None:
115 return decorator
116 else:
117 return decorator(method)
118
[end of src/textual/_work_decorator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/textual/_work_decorator.py b/src/textual/_work_decorator.py
--- a/src/textual/_work_decorator.py
+++ b/src/textual/_work_decorator.py
@@ -58,6 +58,7 @@
group: str = "default",
exit_on_error: bool = True,
exclusive: bool = False,
+ description: str | None = None,
) -> Callable[FactoryParamSpec, Worker[ReturnType]] | Decorator:
"""A decorator used to create [workers](/guide/workers).
@@ -67,6 +68,9 @@
group: A short string to identify a group of workers.
exit_on_error: Exit the app if the worker raises an error. Set to `False` to suppress exceptions.
exclusive: Cancel all workers in the same group.
+ description: Readable description of the worker for debugging purposes.
+ By default, it uses a string representation of the decorated method
+ and its arguments.
"""
def decorator(
@@ -87,22 +91,25 @@
self = args[0]
assert isinstance(self, DOMNode)
- try:
- positional_arguments = ", ".join(repr(arg) for arg in args[1:])
- keyword_arguments = ", ".join(
- f"{name}={value!r}" for name, value in kwargs.items()
- )
- tokens = [positional_arguments, keyword_arguments]
- worker_description = f"{method.__name__}({', '.join(token for token in tokens if token)})"
- except Exception:
- worker_description = "<worker>"
+ if description is not None:
+ debug_description = description
+ else:
+ try:
+ positional_arguments = ", ".join(repr(arg) for arg in args[1:])
+ keyword_arguments = ", ".join(
+ f"{name}={value!r}" for name, value in kwargs.items()
+ )
+ tokens = [positional_arguments, keyword_arguments]
+ debug_description = f"{method.__name__}({', '.join(token for token in tokens if token)})"
+ except Exception:
+ debug_description = "<worker>"
worker = cast(
"Worker[ReturnType]",
self.run_worker(
partial(method, *args, **kwargs),
name=name or method.__name__,
group=group,
- description=worker_description,
+ description=debug_description,
exclusive=exclusive,
exit_on_error=exit_on_error,
),
| {"golden_diff": "diff --git a/src/textual/_work_decorator.py b/src/textual/_work_decorator.py\n--- a/src/textual/_work_decorator.py\n+++ b/src/textual/_work_decorator.py\n@@ -58,6 +58,7 @@\n group: str = \"default\",\n exit_on_error: bool = True,\n exclusive: bool = False,\n+ description: str | None = None,\n ) -> Callable[FactoryParamSpec, Worker[ReturnType]] | Decorator:\n \"\"\"A decorator used to create [workers](/guide/workers).\n \n@@ -67,6 +68,9 @@\n group: A short string to identify a group of workers.\n exit_on_error: Exit the app if the worker raises an error. Set to `False` to suppress exceptions.\n exclusive: Cancel all workers in the same group.\n+ description: Readable description of the worker for debugging purposes.\n+ By default, it uses a string representation of the decorated method\n+ and its arguments.\n \"\"\"\n \n def decorator(\n@@ -87,22 +91,25 @@\n self = args[0]\n assert isinstance(self, DOMNode)\n \n- try:\n- positional_arguments = \", \".join(repr(arg) for arg in args[1:])\n- keyword_arguments = \", \".join(\n- f\"{name}={value!r}\" for name, value in kwargs.items()\n- )\n- tokens = [positional_arguments, keyword_arguments]\n- worker_description = f\"{method.__name__}({', '.join(token for token in tokens if token)})\"\n- except Exception:\n- worker_description = \"<worker>\"\n+ if description is not None:\n+ debug_description = description\n+ else:\n+ try:\n+ positional_arguments = \", \".join(repr(arg) for arg in args[1:])\n+ keyword_arguments = \", \".join(\n+ f\"{name}={value!r}\" for name, value in kwargs.items()\n+ )\n+ tokens = [positional_arguments, keyword_arguments]\n+ debug_description = f\"{method.__name__}({', '.join(token for token in tokens if token)})\"\n+ except Exception:\n+ debug_description = \"<worker>\"\n worker = cast(\n \"Worker[ReturnType]\",\n self.run_worker(\n partial(method, *args, **kwargs),\n name=name or method.__name__,\n group=group,\n- description=worker_description,\n+ description=debug_description,\n exclusive=exclusive,\n exit_on_error=exit_on_error,\n ),\n", "issue": "Add a `description` parameter to the work decorator, to use in place of the auto-generated description.\n\n", "before_files": [{"content": "\"\"\"\n\nA decorator used to create [workers](/guide/workers).\n\"\"\"\n\n\nfrom __future__ import annotations\n\nfrom functools import partial, wraps\nfrom typing import TYPE_CHECKING, Callable, Coroutine, TypeVar, Union, cast, overload\n\nfrom typing_extensions import ParamSpec, TypeAlias\n\nif TYPE_CHECKING:\n from .worker import Worker\n\n\nFactoryParamSpec = ParamSpec(\"FactoryParamSpec\")\nDecoratorParamSpec = ParamSpec(\"DecoratorParamSpec\")\nReturnType = TypeVar(\"ReturnType\")\n\nDecorator: TypeAlias = Callable[\n [\n Union[\n Callable[DecoratorParamSpec, ReturnType],\n Callable[DecoratorParamSpec, Coroutine[None, None, ReturnType]],\n ]\n ],\n Callable[DecoratorParamSpec, \"Worker[ReturnType]\"],\n]\n\n\n@overload\ndef work(\n method: Callable[FactoryParamSpec, Coroutine[None, None, ReturnType]]\n) -> Callable[FactoryParamSpec, \"Worker[ReturnType]\"]:\n ...\n\n\n@overload\ndef work(\n method: Callable[FactoryParamSpec, ReturnType]\n) -> Callable[FactoryParamSpec, \"Worker[ReturnType]\"]:\n ...\n\n\n@overload\ndef work(*, exclusive: bool = False) -> Decorator[..., ReturnType]:\n ...\n\n\ndef work(\n method: Callable[FactoryParamSpec, ReturnType]\n | Callable[FactoryParamSpec, Coroutine[None, None, ReturnType]]\n | None = None,\n *,\n name: str = \"\",\n group: str = \"default\",\n exit_on_error: bool = True,\n exclusive: bool = False,\n) -> Callable[FactoryParamSpec, Worker[ReturnType]] | Decorator:\n \"\"\"A decorator used to create [workers](/guide/workers).\n\n Args:\n method: A function or coroutine.\n name: A short string to identify the worker (in logs and debugging).\n group: A short string to identify a group of workers.\n exit_on_error: Exit the app if the worker raises an error. Set to `False` to suppress exceptions.\n exclusive: Cancel all workers in the same group.\n \"\"\"\n\n def decorator(\n method: (\n Callable[DecoratorParamSpec, ReturnType]\n | Callable[DecoratorParamSpec, Coroutine[None, None, ReturnType]]\n )\n ) -> Callable[DecoratorParamSpec, Worker[ReturnType]]:\n \"\"\"The decorator.\"\"\"\n\n @wraps(method)\n def decorated(\n *args: DecoratorParamSpec.args, **kwargs: DecoratorParamSpec.kwargs\n ) -> Worker[ReturnType]:\n \"\"\"The replaced callable.\"\"\"\n from .dom import DOMNode\n\n self = args[0]\n assert isinstance(self, DOMNode)\n\n try:\n positional_arguments = \", \".join(repr(arg) for arg in args[1:])\n keyword_arguments = \", \".join(\n f\"{name}={value!r}\" for name, value in kwargs.items()\n )\n tokens = [positional_arguments, keyword_arguments]\n worker_description = f\"{method.__name__}({', '.join(token for token in tokens if token)})\"\n except Exception:\n worker_description = \"<worker>\"\n worker = cast(\n \"Worker[ReturnType]\",\n self.run_worker(\n partial(method, *args, **kwargs),\n name=name or method.__name__,\n group=group,\n description=worker_description,\n exclusive=exclusive,\n exit_on_error=exit_on_error,\n ),\n )\n return worker\n\n return decorated\n\n if method is None:\n return decorator\n else:\n return decorator(method)\n", "path": "src/textual/_work_decorator.py"}]} | 1,560 | 546 |
gh_patches_debug_4548 | rasdani/github-patches | git_diff | capitalone__DataProfiler-739 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Windows Install error - ValueError: path 'resources/' cannot end with '/
https://github.com/capitalone/DataProfiler/blob/5b04b7fe5ee3556235c397efb69b32cd5d364a3b/setup.py#L33
Ran into an install isue
ValueError: path 'resources/' cannot end with '/
As per
https://stackoverflow.com/questions/20356482/valueerror-path-conf-cannot-end-with
resource_dir = "resources/"
needs to change to
resource_dir = "resources"
Thank you.
</issue>
<code>
[start of setup.py]
1 """A setuptools for the Data Profiler Application and Python Libraries."""
2
3 import os
4
5 # To use a consistent encoding
6 from codecs import open
7 from os import path
8
9 # Always prefer setuptools over distutils
10 from setuptools import find_packages, setup
11
12 # Load package version
13 from dataprofiler.version import __version__
14
15 here = path.abspath(path.dirname(__file__))
16
17 # Get the long description from the README file
18 with open(path.join(here, "README.md"), encoding="utf-8") as f:
19 LONG_DESCRIPTION = f.read()
20
21 # Get the install_requirements from requirements.txt
22 with open(path.join(here, "requirements.txt"), encoding="utf-8") as f:
23 required_packages = f.read().splitlines()
24
25 # Get the install_requirements from requirements-ml.txt
26 with open(path.join(here, "requirements-ml.txt"), encoding="utf-8") as f:
27 ml_packages = f.read().splitlines()
28
29 # Get the install_requirements from requirements-reports.txt
30 with open(path.join(here, "requirements-reports.txt"), encoding="utf-8") as f:
31 reports_packages = f.read().splitlines()
32
33 resource_dir = "resources/"
34 default_labeler_files = [
35 (d, [os.path.join(d, f) for f in files]) for d, _, files in os.walk(resource_dir)
36 ]
37
38
39 DESCRIPTION = (
40 "What is in your data? Detect schema, statistics and entities in almost any file."
41 )
42
43 setup(
44 name="DataProfiler",
45 version=__version__,
46 python_requires=">=3.8",
47 description=DESCRIPTION,
48 long_description=LONG_DESCRIPTION,
49 long_description_content_type="text/markdown",
50 # The project's main homepage.
51 url="https://github.com/capitalone/data-profiler",
52 # Author details
53 author="Jeremy Goodsitt, Taylor Turner, Michael Davis, Kenny Bean, Tyler Farnan",
54 # Choose your license
55 license="Apache License, Version 2.0",
56 # See https://pypi.python.org/pypi?%3Aaction=list_classifiers
57 classifiers=[
58 # How mature is this project? Common values are
59 # 3 - Alpha
60 # 4 - Beta
61 # 5 - Production/Stable
62 "Development Status :: 5 - Production/Stable",
63 # Indicate who your project is intended for
64 "Intended Audience :: Developers",
65 "Intended Audience :: Education",
66 "Intended Audience :: Information Technology",
67 "Intended Audience :: Science/Research",
68 "Intended Audience :: System Administrators",
69 "Topic :: Education",
70 "Topic :: Scientific/Engineering",
71 "Topic :: Scientific/Engineering :: Information Analysis",
72 "Topic :: Security",
73 "Topic :: Software Development :: Build Tools",
74 # Pick your license as you wish (should match "license" above)
75 "License :: OSI Approved :: Apache Software License",
76 # Specify the Python versions you support here. In particular, ensure
77 # that you indicate whether you support Python 3 or both.
78 "Programming Language :: Python :: 3",
79 ],
80 # What does your project relate to?
81 keywords="Data Investigation",
82 # You can just specify the packages manually here if your project is
83 # simple. Or you can use find_packages().
84 # packages=find_packages(exclude=['src/test', 'src/sample']),
85 packages=find_packages(exclude=["tests", "examples"]),
86 # List run-time dependencies here. These will be installed by pip when
87 # your project is installed. For an analysis of "install_requires" vs pip's
88 # requirements files see:
89 # https://packaging.python.org/en/latest/requirements.html
90 install_requires=required_packages,
91 # List of run-time dependencies for the labeler. These will be installed
92 # by pip when someone installs the project[<label>].
93 extras_require={
94 "ml": ml_packages,
95 "reports": reports_packages,
96 "full": ml_packages + reports_packages,
97 },
98 # # If there are data files included in your packages that need to be
99 # # installed, specify them here. If using Python 2.6 or less, then these
100 # # have to be included in MANIFEST.in as well.
101 # package_data={
102 # 'data': [],
103 # },
104 #
105 # # Although 'package_data' is the preferred approach, in some case you may
106 # # need to place data files outside of your packages. See:
107 # # http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
108 # # In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
109 data_files=default_labeler_files,
110 include_package_data=True,
111 )
112
113 print("find_packages():", find_packages())
114
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -30,7 +30,7 @@
with open(path.join(here, "requirements-reports.txt"), encoding="utf-8") as f:
reports_packages = f.read().splitlines()
-resource_dir = "resources/"
+resource_dir = "resources"
default_labeler_files = [
(d, [os.path.join(d, f) for f in files]) for d, _, files in os.walk(resource_dir)
]
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,7 +30,7 @@\n with open(path.join(here, \"requirements-reports.txt\"), encoding=\"utf-8\") as f:\n reports_packages = f.read().splitlines()\n \n-resource_dir = \"resources/\"\n+resource_dir = \"resources\"\n default_labeler_files = [\n (d, [os.path.join(d, f) for f in files]) for d, _, files in os.walk(resource_dir)\n ]\n", "issue": "Windows Install error - ValueError: path 'resources/' cannot end with '/\nhttps://github.com/capitalone/DataProfiler/blob/5b04b7fe5ee3556235c397efb69b32cd5d364a3b/setup.py#L33\r\n\r\nRan into an install isue \r\nValueError: path 'resources/' cannot end with '/\r\n\r\nAs per \r\nhttps://stackoverflow.com/questions/20356482/valueerror-path-conf-cannot-end-with\r\n\r\nresource_dir = \"resources/\"\r\nneeds to change to \r\nresource_dir = \"resources\"\r\n\r\nThank you. \n", "before_files": [{"content": "\"\"\"A setuptools for the Data Profiler Application and Python Libraries.\"\"\"\n\nimport os\n\n# To use a consistent encoding\nfrom codecs import open\nfrom os import path\n\n# Always prefer setuptools over distutils\nfrom setuptools import find_packages, setup\n\n# Load package version\nfrom dataprofiler.version import __version__\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n LONG_DESCRIPTION = f.read()\n\n# Get the install_requirements from requirements.txt\nwith open(path.join(here, \"requirements.txt\"), encoding=\"utf-8\") as f:\n required_packages = f.read().splitlines()\n\n# Get the install_requirements from requirements-ml.txt\nwith open(path.join(here, \"requirements-ml.txt\"), encoding=\"utf-8\") as f:\n ml_packages = f.read().splitlines()\n\n# Get the install_requirements from requirements-reports.txt\nwith open(path.join(here, \"requirements-reports.txt\"), encoding=\"utf-8\") as f:\n reports_packages = f.read().splitlines()\n\nresource_dir = \"resources/\"\ndefault_labeler_files = [\n (d, [os.path.join(d, f) for f in files]) for d, _, files in os.walk(resource_dir)\n]\n\n\nDESCRIPTION = (\n \"What is in your data? Detect schema, statistics and entities in almost any file.\"\n)\n\nsetup(\n name=\"DataProfiler\",\n version=__version__,\n python_requires=\">=3.8\",\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n # The project's main homepage.\n url=\"https://github.com/capitalone/data-profiler\",\n # Author details\n author=\"Jeremy Goodsitt, Taylor Turner, Michael Davis, Kenny Bean, Tyler Farnan\",\n # Choose your license\n license=\"Apache License, Version 2.0\",\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n # How mature is this project? Common values are\n # 3 - Alpha\n # 4 - Beta\n # 5 - Production/Stable\n \"Development Status :: 5 - Production/Stable\",\n # Indicate who your project is intended for\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Information Technology\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: System Administrators\",\n \"Topic :: Education\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Security\",\n \"Topic :: Software Development :: Build Tools\",\n # Pick your license as you wish (should match \"license\" above)\n \"License :: OSI Approved :: Apache Software License\",\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 3 or both.\n \"Programming Language :: Python :: 3\",\n ],\n # What does your project relate to?\n keywords=\"Data Investigation\",\n # You can just specify the packages manually here if your project is\n # simple. Or you can use find_packages().\n # packages=find_packages(exclude=['src/test', 'src/sample']),\n packages=find_packages(exclude=[\"tests\", \"examples\"]),\n # List run-time dependencies here. These will be installed by pip when\n # your project is installed. For an analysis of \"install_requires\" vs pip's\n # requirements files see:\n # https://packaging.python.org/en/latest/requirements.html\n install_requires=required_packages,\n # List of run-time dependencies for the labeler. These will be installed\n # by pip when someone installs the project[<label>].\n extras_require={\n \"ml\": ml_packages,\n \"reports\": reports_packages,\n \"full\": ml_packages + reports_packages,\n },\n # # If there are data files included in your packages that need to be\n # # installed, specify them here. If using Python 2.6 or less, then these\n # # have to be included in MANIFEST.in as well.\n # package_data={\n # 'data': [],\n # },\n #\n # # Although 'package_data' is the preferred approach, in some case you may\n # # need to place data files outside of your packages. See:\n # # http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa\n # # In this case, 'data_file' will be installed into '<sys.prefix>/my_data'\n data_files=default_labeler_files,\n include_package_data=True,\n)\n\nprint(\"find_packages():\", find_packages())\n", "path": "setup.py"}]} | 1,931 | 113 |
gh_patches_debug_4972 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3342 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider superonefoods is broken
During the global build at 2021-09-22-14-42-27, spider **superonefoods** failed with **0 features** and **1 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-09-22-14-42-27/logs/superonefoods.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-09-22-14-42-27/output/superonefoods.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-09-22-14-42-27/output/superonefoods.geojson))
</issue>
<code>
[start of locations/spiders/superonefoods.py]
1 # -*- coding: utf-8 -*-
2 import json
3 import scrapy
4
5 from locations.items import GeojsonPointItem
6
7
8 class SuperonefoodsSpider(scrapy.Spider):
9 name = "superonefoods"
10 item_attributes = { 'brand': "Super One Foods" }
11 allowed_domains = ["www.superonefoods.com"]
12 start_urls = (
13 'https://www.superonefoods.com/store-finder',
14 )
15
16 def parse(self, response):
17 # retrieve js data variable from script tag
18 items = response.xpath('//script/text()')[3].re("var stores =(.+?);\n")
19
20 # convert data variable from unicode to string
21 items = [str(x) for x in items]
22
23 # convert type string representation of list to type list
24 data = [items[0]]
25
26 # load list into json object for parsing
27 jsondata = json.loads(data[0])
28
29 # loop through json data object and retrieve values; yield the values to GeojsonPointItem
30 for item in jsondata:
31 yield GeojsonPointItem(
32 ref=item.get('_id'),
33 lat=float(item.get('latitude')),
34 lon=float(item.get('longitude')),
35 addr_full=item.get('address'),
36 city=item.get('city'),
37 state=item.get('state'),
38 postcode=item.get('zip'),
39 website='https://www.superonefoods.com/store-details/'+item.get('url'),
40 )
41
[end of locations/spiders/superonefoods.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/superonefoods.py b/locations/spiders/superonefoods.py
--- a/locations/spiders/superonefoods.py
+++ b/locations/spiders/superonefoods.py
@@ -15,7 +15,7 @@
def parse(self, response):
# retrieve js data variable from script tag
- items = response.xpath('//script/text()')[3].re("var stores =(.+?);\n")
+ items = response.xpath('//script/text()')[4].re("var stores =(.+?);\n")
# convert data variable from unicode to string
items = [str(x) for x in items]
| {"golden_diff": "diff --git a/locations/spiders/superonefoods.py b/locations/spiders/superonefoods.py\n--- a/locations/spiders/superonefoods.py\n+++ b/locations/spiders/superonefoods.py\n@@ -15,7 +15,7 @@\n \n def parse(self, response):\n # retrieve js data variable from script tag\n- items = response.xpath('//script/text()')[3].re(\"var stores =(.+?);\\n\")\n+ items = response.xpath('//script/text()')[4].re(\"var stores =(.+?);\\n\")\n \n # convert data variable from unicode to string\n items = [str(x) for x in items]\n", "issue": "Spider superonefoods is broken\nDuring the global build at 2021-09-22-14-42-27, spider **superonefoods** failed with **0 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-09-22-14-42-27/logs/superonefoods.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-09-22-14-42-27/output/superonefoods.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-09-22-14-42-27/output/superonefoods.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\n\n\nclass SuperonefoodsSpider(scrapy.Spider):\n name = \"superonefoods\"\n item_attributes = { 'brand': \"Super One Foods\" }\n allowed_domains = [\"www.superonefoods.com\"]\n start_urls = (\n 'https://www.superonefoods.com/store-finder',\n )\n\n def parse(self, response):\n # retrieve js data variable from script tag\n items = response.xpath('//script/text()')[3].re(\"var stores =(.+?);\\n\")\n\n # convert data variable from unicode to string\n items = [str(x) for x in items]\n\n # convert type string representation of list to type list\n data = [items[0]]\n\n # load list into json object for parsing\n jsondata = json.loads(data[0])\n\n # loop through json data object and retrieve values; yield the values to GeojsonPointItem\n for item in jsondata:\n yield GeojsonPointItem(\n ref=item.get('_id'),\n lat=float(item.get('latitude')),\n lon=float(item.get('longitude')),\n addr_full=item.get('address'),\n city=item.get('city'),\n state=item.get('state'),\n postcode=item.get('zip'),\n website='https://www.superonefoods.com/store-details/'+item.get('url'),\n )\n", "path": "locations/spiders/superonefoods.py"}]} | 1,104 | 149 |
gh_patches_debug_31326 | rasdani/github-patches | git_diff | apluslms__a-plus-575 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Compress HTML pages in the cache
Exercise and chapter pages are stored in the cache and only update if the backend reports that there is a change. Some pages might be large (e.g. 1M), but do compress relatively well. Memcached API supports compression on the fly, but that is not usable over django API.
Thus, we should at least compress HTML content manually. Alternatively, we can specialize `CachedAbstract` for memcached, which would also allow us to use `cas` operation.
Relevant files:
* `lib/cache/cached.py`
* `exercise/cache/exercise.py` (`content` in `_generate_data(...)` and `content()`)
</issue>
<code>
[start of exercise/cache/exercise.py]
1 import time
2 from django.conf import settings
3 from django.db.models.signals import post_save, post_delete
4
5 from lib.cache import CachedAbstract
6 from lib.remote_page import RemotePageNotModified
7 from ..protocol.aplus import load_exercise_page
8
9
10 class ExerciseCache(CachedAbstract):
11 """ Exercise HTML content """
12 KEY_PREFIX = "exercise"
13
14 def __init__(self, exercise, language, request, students, url_name):
15 self.exercise = exercise
16 self.load_args = [language, request, students, url_name]
17 super().__init__(exercise, modifiers=[language])
18
19 def _needs_generation(self, data):
20 expires = data['expires'] if data else None
21 return not expires or time.time() > expires
22
23 def _generate_data(self, exercise, data=None):
24 try:
25 page = exercise.load_page(
26 *self.load_args,
27 last_modified=data['last_modified'] if data else None
28 )
29 return {
30 'head': page.head,
31 'content': page.content,
32 'last_modified': page.last_modified,
33 'expires': page.expires if page.is_loaded else 0,
34 }
35 except RemotePageNotModified as e:
36 if e.expires:
37 data['expires'] = e.expires
38 return data
39
40 def head(self):
41 return self.data['head']
42
43 def content(self):
44 return self.data['content']
45
46
47 def invalidate_instance(instance):
48 for module in instance.course_modules.all():
49 for exercise in module.learning_objects.all():
50 for language,_ in settings.LANGUAGES:
51 ExerciseCache.invalidate(exercise, modifiers=[language])
52
[end of exercise/cache/exercise.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/exercise/cache/exercise.py b/exercise/cache/exercise.py
--- a/exercise/cache/exercise.py
+++ b/exercise/cache/exercise.py
@@ -1,4 +1,6 @@
+import logging
import time
+
from django.conf import settings
from django.db.models.signals import post_save, post_delete
@@ -6,6 +8,18 @@
from lib.remote_page import RemotePageNotModified
from ..protocol.aplus import load_exercise_page
+logger = logging.getLogger('aplus.cached')
+
+try:
+ from lz4.block import compress as _compress, decompress
+ def compress(data):
+ return _compress(data, compression=1)
+except ImportError:
+ logger.warning("Unable to import lz4, using a slower zlib instead")
+ from zlib import compress as _compress, decompress
+ def compress(data):
+ return _compress(data, level=1)
+
class ExerciseCache(CachedAbstract):
""" Exercise HTML content """
@@ -26,9 +40,12 @@
*self.load_args,
last_modified=data['last_modified'] if data else None
)
+
+ content = compress(page.content.encode('utf-8'))
+
return {
'head': page.head,
- 'content': page.content,
+ 'content': content,
'last_modified': page.last_modified,
'expires': page.expires if page.is_loaded else 0,
}
@@ -41,7 +58,8 @@
return self.data['head']
def content(self):
- return self.data['content']
+ content = decompress(self.data['content']).decode('utf-8')
+ return content
def invalidate_instance(instance):
| {"golden_diff": "diff --git a/exercise/cache/exercise.py b/exercise/cache/exercise.py\n--- a/exercise/cache/exercise.py\n+++ b/exercise/cache/exercise.py\n@@ -1,4 +1,6 @@\n+import logging\n import time\n+\n from django.conf import settings\n from django.db.models.signals import post_save, post_delete\n \n@@ -6,6 +8,18 @@\n from lib.remote_page import RemotePageNotModified\n from ..protocol.aplus import load_exercise_page\n \n+logger = logging.getLogger('aplus.cached')\n+\n+try:\n+ from lz4.block import compress as _compress, decompress\n+ def compress(data):\n+ return _compress(data, compression=1)\n+except ImportError:\n+ logger.warning(\"Unable to import lz4, using a slower zlib instead\")\n+ from zlib import compress as _compress, decompress\n+ def compress(data):\n+ return _compress(data, level=1)\n+\n \n class ExerciseCache(CachedAbstract):\n \"\"\" Exercise HTML content \"\"\"\n@@ -26,9 +40,12 @@\n *self.load_args,\n last_modified=data['last_modified'] if data else None\n )\n+\n+ content = compress(page.content.encode('utf-8'))\n+\n return {\n 'head': page.head,\n- 'content': page.content,\n+ 'content': content,\n 'last_modified': page.last_modified,\n 'expires': page.expires if page.is_loaded else 0,\n }\n@@ -41,7 +58,8 @@\n return self.data['head']\n \n def content(self):\n- return self.data['content']\n+ content = decompress(self.data['content']).decode('utf-8')\n+ return content\n \n \n def invalidate_instance(instance):\n", "issue": "Compress HTML pages in the cache\nExercise and chapter pages are stored in the cache and only update if the backend reports that there is a change. Some pages might be large (e.g. 1M), but do compress relatively well. Memcached API supports compression on the fly, but that is not usable over django API.\r\n\r\nThus, we should at least compress HTML content manually. Alternatively, we can specialize `CachedAbstract` for memcached, which would also allow us to use `cas` operation.\r\n\r\nRelevant files:\r\n* `lib/cache/cached.py`\r\n* `exercise/cache/exercise.py` (`content` in `_generate_data(...)` and `content()`)\n", "before_files": [{"content": "import time\nfrom django.conf import settings\nfrom django.db.models.signals import post_save, post_delete\n\nfrom lib.cache import CachedAbstract\nfrom lib.remote_page import RemotePageNotModified\nfrom ..protocol.aplus import load_exercise_page\n\n\nclass ExerciseCache(CachedAbstract):\n \"\"\" Exercise HTML content \"\"\"\n KEY_PREFIX = \"exercise\"\n\n def __init__(self, exercise, language, request, students, url_name):\n self.exercise = exercise\n self.load_args = [language, request, students, url_name]\n super().__init__(exercise, modifiers=[language])\n\n def _needs_generation(self, data):\n expires = data['expires'] if data else None\n return not expires or time.time() > expires\n\n def _generate_data(self, exercise, data=None):\n try:\n page = exercise.load_page(\n *self.load_args,\n last_modified=data['last_modified'] if data else None\n )\n return {\n 'head': page.head,\n 'content': page.content,\n 'last_modified': page.last_modified,\n 'expires': page.expires if page.is_loaded else 0,\n }\n except RemotePageNotModified as e:\n if e.expires:\n data['expires'] = e.expires\n return data\n\n def head(self):\n return self.data['head']\n\n def content(self):\n return self.data['content']\n\n\ndef invalidate_instance(instance):\n for module in instance.course_modules.all():\n for exercise in module.learning_objects.all():\n for language,_ in settings.LANGUAGES:\n ExerciseCache.invalidate(exercise, modifiers=[language])\n", "path": "exercise/cache/exercise.py"}]} | 1,110 | 377 |
gh_patches_debug_8197 | rasdani/github-patches | git_diff | sanic-org__sanic-2438 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Easier websocket interface annotation
Right now, to properly annotate a websocket endpoint you need to do this:
```python
from sanic.server.websockets.impl import WebsocketImplProtocol
from sanic import Request
@app.websocket("")
async def handler(request: Request, ws: WebsocketImplProtocol):
...
```
That is not easy or intuitive.
This would be much nicer:
```python
from sanic import Request, Websocket
@app.websocket("")
async def handler(request: Request, ws: Websocket):
...
```
We should just alias and put it inside `__init__.py` for convenience.
</issue>
<code>
[start of sanic/__init__.py]
1 from sanic.__version__ import __version__
2 from sanic.app import Sanic
3 from sanic.blueprints import Blueprint
4 from sanic.constants import HTTPMethod
5 from sanic.request import Request
6 from sanic.response import HTTPResponse, html, json, text
7
8
9 __all__ = (
10 "__version__",
11 "Sanic",
12 "Blueprint",
13 "HTTPMethod",
14 "HTTPResponse",
15 "Request",
16 "html",
17 "json",
18 "text",
19 )
20
[end of sanic/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sanic/__init__.py b/sanic/__init__.py
--- a/sanic/__init__.py
+++ b/sanic/__init__.py
@@ -4,6 +4,7 @@
from sanic.constants import HTTPMethod
from sanic.request import Request
from sanic.response import HTTPResponse, html, json, text
+from sanic.server.websockets.impl import WebsocketImplProtocol as Websocket
__all__ = (
@@ -13,6 +14,7 @@
"HTTPMethod",
"HTTPResponse",
"Request",
+ "Websocket",
"html",
"json",
"text",
| {"golden_diff": "diff --git a/sanic/__init__.py b/sanic/__init__.py\n--- a/sanic/__init__.py\n+++ b/sanic/__init__.py\n@@ -4,6 +4,7 @@\n from sanic.constants import HTTPMethod\n from sanic.request import Request\n from sanic.response import HTTPResponse, html, json, text\n+from sanic.server.websockets.impl import WebsocketImplProtocol as Websocket\n \n \n __all__ = (\n@@ -13,6 +14,7 @@\n \"HTTPMethod\",\n \"HTTPResponse\",\n \"Request\",\n+ \"Websocket\",\n \"html\",\n \"json\",\n \"text\",\n", "issue": "Easier websocket interface annotation\nRight now, to properly annotate a websocket endpoint you need to do this:\r\n\r\n```python\r\nfrom sanic.server.websockets.impl import WebsocketImplProtocol\r\nfrom sanic import Request\r\n\r\[email protected](\"\")\r\nasync def handler(request: Request, ws: WebsocketImplProtocol):\r\n ...\r\n```\r\n\r\nThat is not easy or intuitive.\r\n\r\nThis would be much nicer:\r\n\r\n```python\r\nfrom sanic import Request, Websocket\r\n\r\[email protected](\"\")\r\nasync def handler(request: Request, ws: Websocket):\r\n ...\r\n```\r\n\r\nWe should just alias and put it inside `__init__.py` for convenience.\n", "before_files": [{"content": "from sanic.__version__ import __version__\nfrom sanic.app import Sanic\nfrom sanic.blueprints import Blueprint\nfrom sanic.constants import HTTPMethod\nfrom sanic.request import Request\nfrom sanic.response import HTTPResponse, html, json, text\n\n\n__all__ = (\n \"__version__\",\n \"Sanic\",\n \"Blueprint\",\n \"HTTPMethod\",\n \"HTTPResponse\",\n \"Request\",\n \"html\",\n \"json\",\n \"text\",\n)\n", "path": "sanic/__init__.py"}]} | 798 | 142 |
gh_patches_debug_1060 | rasdani/github-patches | git_diff | wagtail__wagtail-1791 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cachebusting query parameter (e.g. _=1441835249458) not ignored by api
From the [documentation for jQuery.ajax, under "cache"](http://api.jquery.com/jquery.ajax/):
> Setting cache to false will only work correctly with HEAD and GET requests. It works by appending "_={timestamp}" to the GET parameters. The parameter is not needed for other types of requests, except in IE8 when a POST is made to a URL that has already been requested by a GET.
It seems like it's standard practice to ignore the underscore keyword. Unless I'm mistaken this is an oversight and not a disagreement on the principle of the thing.
Reproduce: make an Ajax call to any wagtail API endpoint with the cache flag set to false. Or just navigate to something like `http://localhost:8000/api/v1/pages/?type=home.HomePage&_=1441835249458`
You'll get this message:
```
{
"message": "query parameter is not an operation or a recognised field: _"
}
```
</issue>
<code>
[start of wagtail/contrib/wagtailapi/endpoints.py]
1 from __future__ import absolute_import
2
3 from collections import OrderedDict
4
5 from django.conf.urls import url
6 from django.http import Http404
7
8 from rest_framework import status
9 from rest_framework.response import Response
10 from rest_framework.viewsets import GenericViewSet
11
12 from wagtail.wagtailcore.models import Page
13 from wagtail.wagtailimages.models import get_image_model
14 from wagtail.wagtaildocs.models import Document
15 from wagtail.wagtailcore.utils import resolve_model_string
16
17 from .filters import (
18 FieldsFilter, OrderingFilter, SearchFilter,
19 ChildOfFilter, DescendantOfFilter
20 )
21 from .renderers import WagtailJSONRenderer
22 from .pagination import WagtailPagination
23 from .serializers import BaseSerializer, PageSerializer, DocumentSerializer, ImageSerializer, get_serializer_class
24 from .utils import BadRequestError
25
26
27 class BaseAPIEndpoint(GenericViewSet):
28 renderer_classes = [WagtailJSONRenderer]
29 pagination_class = WagtailPagination
30 base_serializer_class = BaseSerializer
31 filter_classes = []
32 queryset = None # Set on subclasses or implement `get_queryset()`.
33
34 known_query_parameters = frozenset([
35 'limit',
36 'offset',
37 'fields',
38 'order',
39 'search',
40 ])
41 extra_api_fields = []
42 name = None # Set on subclass.
43
44 def listing_view(self, request):
45 queryset = self.get_queryset()
46 self.check_query_parameters(queryset)
47 queryset = self.filter_queryset(queryset)
48 queryset = self.paginate_queryset(queryset)
49 serializer = self.get_serializer(queryset, many=True)
50 return self.get_paginated_response(serializer.data)
51
52 def detail_view(self, request, pk):
53 instance = self.get_object()
54 serializer = self.get_serializer(instance)
55 return Response(serializer.data)
56
57 def handle_exception(self, exc):
58 if isinstance(exc, Http404):
59 data = {'message': str(exc)}
60 return Response(data, status=status.HTTP_404_NOT_FOUND)
61 elif isinstance(exc, BadRequestError):
62 data = {'message': str(exc)}
63 return Response(data, status=status.HTTP_400_BAD_REQUEST)
64 return super(BaseAPIEndpoint, self).handle_exception(exc)
65
66 def get_api_fields(self, model):
67 """
68 This returns a list of field names that are allowed to
69 be used in the API (excluding the id field).
70 """
71 api_fields = self.extra_api_fields[:]
72
73 if hasattr(model, 'api_fields'):
74 api_fields.extend(model.api_fields)
75
76 return api_fields
77
78 def check_query_parameters(self, queryset):
79 """
80 Ensure that only valid query paramters are included in the URL.
81 """
82 query_parameters = set(self.request.GET.keys())
83
84 # All query paramters must be either a field or an operation
85 allowed_query_parameters = set(self.get_api_fields(queryset.model)).union(self.known_query_parameters).union({'id'})
86 unknown_parameters = query_parameters - allowed_query_parameters
87 if unknown_parameters:
88 raise BadRequestError("query parameter is not an operation or a recognised field: %s" % ', '.join(sorted(unknown_parameters)))
89
90 def get_serializer_class(self):
91 request = self.request
92
93 # Get model
94 if self.action == 'listing_view':
95 model = self.get_queryset().model
96 else:
97 model = type(self.get_object())
98
99 # Get all available fields
100 all_fields = self.get_api_fields(model)
101 all_fields = list(OrderedDict.fromkeys(all_fields)) # Removes any duplicates in case the developer put "title" in api_fields
102
103 if self.action == 'listing_view':
104 # Listing views just show the title field and any other allowed field the user specified
105 if 'fields' in request.GET:
106 fields = set(request.GET['fields'].split(','))
107 else:
108 fields = {'title'}
109
110 unknown_fields = fields - set(all_fields)
111
112 if unknown_fields:
113 raise BadRequestError("unknown fields: %s" % ', '.join(sorted(unknown_fields)))
114
115 # Reorder fields so it matches the order of all_fields
116 fields = [field for field in all_fields if field in fields]
117 else:
118 # Detail views show all fields all the time
119 fields = all_fields
120
121 # Always show id and meta first
122 fields = ['id', 'meta'] + fields
123
124 # If showing details, add the parent field
125 if isinstance(self, PagesAPIEndpoint) and self.get_serializer_context().get('show_details', False):
126 fields.insert(2, 'parent')
127
128 return get_serializer_class(model, fields, base=self.base_serializer_class)
129
130 def get_serializer_context(self):
131 """
132 The serialization context differs between listing and detail views.
133 """
134 request = self.request
135
136 if self.action == 'listing_view':
137 return {
138 'request': request,
139 'view': self,
140 }
141
142 return {
143 'request': request,
144 'view': self,
145 'show_details': True
146 }
147
148 def get_renderer_context(self):
149 context = super(BaseAPIEndpoint, self).get_renderer_context()
150 context['endpoints'] = [
151 PagesAPIEndpoint,
152 ImagesAPIEndpoint,
153 DocumentsAPIEndpoint
154 ]
155 return context
156
157 @classmethod
158 def get_urlpatterns(cls):
159 """
160 This returns a list of URL patterns for the endpoint
161 """
162 return [
163 url(r'^$', cls.as_view({'get': 'listing_view'}), name='listing'),
164 url(r'^(?P<pk>\d+)/$', cls.as_view({'get': 'detail_view'}), name='detail'),
165 ]
166
167 @classmethod
168 def has_model(cls, model):
169 return NotImplemented
170
171
172 class PagesAPIEndpoint(BaseAPIEndpoint):
173 base_serializer_class = PageSerializer
174 filter_backends = [
175 FieldsFilter,
176 ChildOfFilter,
177 DescendantOfFilter,
178 OrderingFilter,
179 SearchFilter
180 ]
181 known_query_parameters = BaseAPIEndpoint.known_query_parameters.union([
182 'type',
183 'child_of',
184 'descendant_of',
185 ])
186 extra_api_fields = ['title']
187 name = 'pages'
188
189 def get_queryset(self):
190 request = self.request
191
192 # Allow pages to be filtered to a specific type
193 if 'type' not in request.GET:
194 model = Page
195 else:
196 model_name = request.GET['type']
197 try:
198 model = resolve_model_string(model_name)
199 except LookupError:
200 raise BadRequestError("type doesn't exist")
201 if not issubclass(model, Page):
202 raise BadRequestError("type doesn't exist")
203
204 # Get live pages that are not in a private section
205 queryset = model.objects.public().live()
206
207 # Filter by site
208 queryset = queryset.descendant_of(request.site.root_page, inclusive=True)
209
210 return queryset
211
212 def get_object(self):
213 base = super(PagesAPIEndpoint, self).get_object()
214 return base.specific
215
216 @classmethod
217 def has_model(cls, model):
218 return issubclass(model, Page)
219
220
221 class ImagesAPIEndpoint(BaseAPIEndpoint):
222 queryset = get_image_model().objects.all().order_by('id')
223 base_serializer_class = ImageSerializer
224 filter_backends = [FieldsFilter, OrderingFilter, SearchFilter]
225 extra_api_fields = ['title', 'tags', 'width', 'height']
226 name = 'images'
227
228 @classmethod
229 def has_model(cls, model):
230 return model == get_image_model()
231
232
233 class DocumentsAPIEndpoint(BaseAPIEndpoint):
234 queryset = Document.objects.all().order_by('id')
235 base_serializer_class = DocumentSerializer
236 filter_backends = [FieldsFilter, OrderingFilter, SearchFilter]
237 extra_api_fields = ['title', 'tags']
238 name = 'documents'
239
240 @classmethod
241 def has_model(cls, model):
242 return model == Document
243
[end of wagtail/contrib/wagtailapi/endpoints.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/contrib/wagtailapi/endpoints.py b/wagtail/contrib/wagtailapi/endpoints.py
--- a/wagtail/contrib/wagtailapi/endpoints.py
+++ b/wagtail/contrib/wagtailapi/endpoints.py
@@ -37,6 +37,9 @@
'fields',
'order',
'search',
+
+ # Used by jQuery for cache-busting. See #1671
+ '_',
])
extra_api_fields = []
name = None # Set on subclass.
| {"golden_diff": "diff --git a/wagtail/contrib/wagtailapi/endpoints.py b/wagtail/contrib/wagtailapi/endpoints.py\n--- a/wagtail/contrib/wagtailapi/endpoints.py\n+++ b/wagtail/contrib/wagtailapi/endpoints.py\n@@ -37,6 +37,9 @@\n 'fields',\n 'order',\n 'search',\n+\n+ # Used by jQuery for cache-busting. See #1671\n+ '_',\n ])\n extra_api_fields = []\n name = None # Set on subclass.\n", "issue": "Cachebusting query parameter (e.g. _=1441835249458) not ignored by api\nFrom the [documentation for jQuery.ajax, under \"cache\"](http://api.jquery.com/jquery.ajax/):\n\n> Setting cache to false will only work correctly with HEAD and GET requests. It works by appending \"_={timestamp}\" to the GET parameters. The parameter is not needed for other types of requests, except in IE8 when a POST is made to a URL that has already been requested by a GET.\n\nIt seems like it's standard practice to ignore the underscore keyword. Unless I'm mistaken this is an oversight and not a disagreement on the principle of the thing.\n\nReproduce: make an Ajax call to any wagtail API endpoint with the cache flag set to false. Or just navigate to something like `http://localhost:8000/api/v1/pages/?type=home.HomePage&_=1441835249458`\n\nYou'll get this message: \n\n```\n{\n \"message\": \"query parameter is not an operation or a recognised field: _\"\n}\n```\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom collections import OrderedDict\n\nfrom django.conf.urls import url\nfrom django.http import Http404\n\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import GenericViewSet\n\nfrom wagtail.wagtailcore.models import Page\nfrom wagtail.wagtailimages.models import get_image_model\nfrom wagtail.wagtaildocs.models import Document\nfrom wagtail.wagtailcore.utils import resolve_model_string\n\nfrom .filters import (\n FieldsFilter, OrderingFilter, SearchFilter,\n ChildOfFilter, DescendantOfFilter\n)\nfrom .renderers import WagtailJSONRenderer\nfrom .pagination import WagtailPagination\nfrom .serializers import BaseSerializer, PageSerializer, DocumentSerializer, ImageSerializer, get_serializer_class\nfrom .utils import BadRequestError\n\n\nclass BaseAPIEndpoint(GenericViewSet):\n renderer_classes = [WagtailJSONRenderer]\n pagination_class = WagtailPagination\n base_serializer_class = BaseSerializer\n filter_classes = []\n queryset = None # Set on subclasses or implement `get_queryset()`.\n\n known_query_parameters = frozenset([\n 'limit',\n 'offset',\n 'fields',\n 'order',\n 'search',\n ])\n extra_api_fields = []\n name = None # Set on subclass.\n\n def listing_view(self, request):\n queryset = self.get_queryset()\n self.check_query_parameters(queryset)\n queryset = self.filter_queryset(queryset)\n queryset = self.paginate_queryset(queryset)\n serializer = self.get_serializer(queryset, many=True)\n return self.get_paginated_response(serializer.data)\n\n def detail_view(self, request, pk):\n instance = self.get_object()\n serializer = self.get_serializer(instance)\n return Response(serializer.data)\n\n def handle_exception(self, exc):\n if isinstance(exc, Http404):\n data = {'message': str(exc)}\n return Response(data, status=status.HTTP_404_NOT_FOUND)\n elif isinstance(exc, BadRequestError):\n data = {'message': str(exc)}\n return Response(data, status=status.HTTP_400_BAD_REQUEST)\n return super(BaseAPIEndpoint, self).handle_exception(exc)\n\n def get_api_fields(self, model):\n \"\"\"\n This returns a list of field names that are allowed to\n be used in the API (excluding the id field).\n \"\"\"\n api_fields = self.extra_api_fields[:]\n\n if hasattr(model, 'api_fields'):\n api_fields.extend(model.api_fields)\n\n return api_fields\n\n def check_query_parameters(self, queryset):\n \"\"\"\n Ensure that only valid query paramters are included in the URL.\n \"\"\"\n query_parameters = set(self.request.GET.keys())\n\n # All query paramters must be either a field or an operation\n allowed_query_parameters = set(self.get_api_fields(queryset.model)).union(self.known_query_parameters).union({'id'})\n unknown_parameters = query_parameters - allowed_query_parameters\n if unknown_parameters:\n raise BadRequestError(\"query parameter is not an operation or a recognised field: %s\" % ', '.join(sorted(unknown_parameters)))\n\n def get_serializer_class(self):\n request = self.request\n\n # Get model\n if self.action == 'listing_view':\n model = self.get_queryset().model\n else:\n model = type(self.get_object())\n\n # Get all available fields\n all_fields = self.get_api_fields(model)\n all_fields = list(OrderedDict.fromkeys(all_fields)) # Removes any duplicates in case the developer put \"title\" in api_fields\n\n if self.action == 'listing_view':\n # Listing views just show the title field and any other allowed field the user specified\n if 'fields' in request.GET:\n fields = set(request.GET['fields'].split(','))\n else:\n fields = {'title'}\n\n unknown_fields = fields - set(all_fields)\n\n if unknown_fields:\n raise BadRequestError(\"unknown fields: %s\" % ', '.join(sorted(unknown_fields)))\n\n # Reorder fields so it matches the order of all_fields\n fields = [field for field in all_fields if field in fields]\n else:\n # Detail views show all fields all the time\n fields = all_fields\n\n # Always show id and meta first\n fields = ['id', 'meta'] + fields\n\n # If showing details, add the parent field\n if isinstance(self, PagesAPIEndpoint) and self.get_serializer_context().get('show_details', False):\n fields.insert(2, 'parent')\n\n return get_serializer_class(model, fields, base=self.base_serializer_class)\n\n def get_serializer_context(self):\n \"\"\"\n The serialization context differs between listing and detail views.\n \"\"\"\n request = self.request\n\n if self.action == 'listing_view':\n return {\n 'request': request,\n 'view': self,\n }\n\n return {\n 'request': request,\n 'view': self,\n 'show_details': True\n }\n\n def get_renderer_context(self):\n context = super(BaseAPIEndpoint, self).get_renderer_context()\n context['endpoints'] = [\n PagesAPIEndpoint,\n ImagesAPIEndpoint,\n DocumentsAPIEndpoint\n ]\n return context\n\n @classmethod\n def get_urlpatterns(cls):\n \"\"\"\n This returns a list of URL patterns for the endpoint\n \"\"\"\n return [\n url(r'^$', cls.as_view({'get': 'listing_view'}), name='listing'),\n url(r'^(?P<pk>\\d+)/$', cls.as_view({'get': 'detail_view'}), name='detail'),\n ]\n\n @classmethod\n def has_model(cls, model):\n return NotImplemented\n\n\nclass PagesAPIEndpoint(BaseAPIEndpoint):\n base_serializer_class = PageSerializer\n filter_backends = [\n FieldsFilter,\n ChildOfFilter,\n DescendantOfFilter,\n OrderingFilter,\n SearchFilter\n ]\n known_query_parameters = BaseAPIEndpoint.known_query_parameters.union([\n 'type',\n 'child_of',\n 'descendant_of',\n ])\n extra_api_fields = ['title']\n name = 'pages'\n\n def get_queryset(self):\n request = self.request\n\n # Allow pages to be filtered to a specific type\n if 'type' not in request.GET:\n model = Page\n else:\n model_name = request.GET['type']\n try:\n model = resolve_model_string(model_name)\n except LookupError:\n raise BadRequestError(\"type doesn't exist\")\n if not issubclass(model, Page):\n raise BadRequestError(\"type doesn't exist\")\n\n # Get live pages that are not in a private section\n queryset = model.objects.public().live()\n\n # Filter by site\n queryset = queryset.descendant_of(request.site.root_page, inclusive=True)\n\n return queryset\n\n def get_object(self):\n base = super(PagesAPIEndpoint, self).get_object()\n return base.specific\n\n @classmethod\n def has_model(cls, model):\n return issubclass(model, Page)\n\n\nclass ImagesAPIEndpoint(BaseAPIEndpoint):\n queryset = get_image_model().objects.all().order_by('id')\n base_serializer_class = ImageSerializer\n filter_backends = [FieldsFilter, OrderingFilter, SearchFilter]\n extra_api_fields = ['title', 'tags', 'width', 'height']\n name = 'images'\n\n @classmethod\n def has_model(cls, model):\n return model == get_image_model()\n\n\nclass DocumentsAPIEndpoint(BaseAPIEndpoint):\n queryset = Document.objects.all().order_by('id')\n base_serializer_class = DocumentSerializer\n filter_backends = [FieldsFilter, OrderingFilter, SearchFilter]\n extra_api_fields = ['title', 'tags']\n name = 'documents'\n\n @classmethod\n def has_model(cls, model):\n return model == Document\n", "path": "wagtail/contrib/wagtailapi/endpoints.py"}]} | 3,081 | 125 |
gh_patches_debug_28345 | rasdani/github-patches | git_diff | webkom__lego-1292 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make contact form send to mail list for HS, instead of individual members
The form currently fetches all members of the HS group, and sends the message to each individual address. This makes it cumbersome to discuss the message for HS, it would be better to send it to their mailing list instead.
</issue>
<code>
[start of lego/apps/users/fixtures/initial_abakus_groups.py]
1 from lego.apps.users.constants import GROUP_COMMITTEE, GROUP_GRADE
2 from lego.apps.users.models import AbakusGroup
3 from lego.utils.functions import insert_abakus_groups
4
5 # isort:skip
6 """
7 The structure of the tree is key and a list of two dicts.
8 The first dict is the parameters of the current group
9 and the second dict are the children of the current group.
10
11 E.g. Abakus: [
12 {
13 description: 'ABAKUSGRUPPE',
14 permissions: ['/sudo/...']
15 ...
16 },
17 {
18 'Webkom': [{
19 description: 'WEBKOMGRUPPE',
20 permissions: ['/sudo/']
21 ...
22 }, {}]
23 }
24 ]
25 """
26
27 initial_tree = {
28 'Users': [{
29 'description': 'Brukere på Abakus.no'
30 }, {}],
31 'Abakus': [
32 {
33 'description':
34 'Medlemmer av Abakus',
35 'permissions': [
36 '/sudo/admin/meetings/create', '/sudo/admin/meetinginvitations/create',
37 '/sudo/admin/registrations/create/', '/sudo/admin/events/payment/',
38 '/sudo/admin/comments/create'
39 ]
40 },
41 {
42 'Abakom': [
43 {
44 'description':
45 'Medlemmer av Abakom',
46 'permissions': [
47 '/sudo/admin/events/',
48 '/sudo/admin/pools/',
49 '/sudo/admin/registrations/',
50 '/sudo/admin/companies/',
51 '/sudo/admin/joblistings/',
52 ]
53 },
54 {
55 'Arrkom': [{
56 'type': GROUP_COMMITTEE,
57 'logo_id': 'abakus_arrkom.png'
58 }, {}],
59 'backup': [{
60 'type': GROUP_COMMITTEE,
61 'logo_id': 'abakus_backup.png'
62 }, {}],
63 'Bedkom': [
64 {
65 'type':
66 GROUP_COMMITTEE,
67 'logo_id':
68 'abakus_bedkom.png',
69 'permissions': [
70 '/sudo/admin/companyinterest/', '/sudo/admin/surveys/',
71 '/sudo/admin/submissions/'
72 ]
73 }, {}
74 ],
75 'Fagkom': [
76 {
77 'type':
78 GROUP_COMMITTEE,
79 'logo_id':
80 'abakus_fagkom.png',
81 'permissions': [
82 '/sudo/admin/companyinterest/', '/sudo/admin/surveys/',
83 '/sudo/admin/submissions/'
84 ]
85 }, {}
86 ],
87 'Koskom': [{
88 'type': GROUP_COMMITTEE,
89 'logo_id': 'abakus_koskom.png'
90 }, {}],
91 'LaBamba': [{
92 'type': GROUP_COMMITTEE,
93 'logo_id': 'abakus_labamba.png'
94 }, {}],
95 'PR': [{
96 'type': GROUP_COMMITTEE,
97 'logo_id': 'abakus_pr.png'
98 }, {}],
99 'readme': [{
100 'type': GROUP_COMMITTEE,
101 'logo_id': 'abakus_readme.png'
102 }, {}],
103 'Webkom': [
104 {
105 'type': GROUP_COMMITTEE,
106 'logo_id': 'abakus_webkom.png',
107 'permissions': ['/sudo/'],
108 'text': 'hei'
109 }, {}
110 ],
111 'Hovedstyret':
112 [{
113 'logo_id': 'abakus_hs.png',
114 'permissions': ['/sudo/admin/'],
115 }, {}]
116 }
117 ],
118 'Interessegrupper':
119 [{
120 'description': 'Super-gruppe for alle interessegrupper i Abakus'
121 }, {}]
122 }
123 ],
124 'Students': [
125 {},
126 {
127 'Datateknologi': [
128 {},
129 {
130 '1. klasse Datateknologi': [{
131 'type': GROUP_GRADE
132 }, {}],
133 '2. klasse Datateknologi': [{
134 'type': GROUP_GRADE
135 }, {}],
136 '3. klasse Datateknologi': [{
137 'type': GROUP_GRADE
138 }, {}],
139 '4. klasse Datateknologi': [{
140 'type': GROUP_GRADE
141 }, {}],
142 '5. klasse Datateknologi': [{
143 'type': GROUP_GRADE
144 }, {}],
145 }
146 ],
147 'Kommunikasjonsteknologi': [
148 {},
149 {
150 '1. klasse Kommunikasjonsteknologi': [{
151 'type': GROUP_GRADE
152 }, {}],
153 '2. klasse Kommunikasjonsteknologi': [{
154 'type': GROUP_GRADE
155 }, {}],
156 '3. klasse Kommunikasjonsteknologi': [{
157 'type': GROUP_GRADE
158 }, {}],
159 '4. klasse Kommunikasjonsteknologi': [{
160 'type': GROUP_GRADE
161 }, {}],
162 '5. klasse Kommunikasjonsteknologi': [{
163 'type': GROUP_GRADE
164 }, {}],
165 }
166 ]
167 }
168 ]
169 }
170
171
172 def load_abakus_groups():
173 insert_abakus_groups(initial_tree)
174 AbakusGroup.objects.rebuild()
175
[end of lego/apps/users/fixtures/initial_abakus_groups.py]
[start of lego/apps/contact/send.py]
1 from lego.apps.users.models import AbakusGroup
2 from lego.utils.tasks import send_email
3
4
5 def send_message(title, message, user, anonymous):
6 """
7 Send a message to HS when users posts to the contact form.
8 Don't catch AbakusGroup.DoesNotExist, this notifies us when the group doesn't exist.
9 """
10 anonymous = anonymous if user.is_authenticated else True
11 abakus_group = AbakusGroup.objects.get(name='Hovedstyret')
12 users = [membership.user for membership in abakus_group.memberships.select_related('user')]
13 emails = [user.email_address for user in users]
14
15 from_name = 'Anonymous' if anonymous else user.full_name
16 from_email = 'Unknown' if anonymous else user.email_address
17
18 send_email.delay(
19 to_email=emails, context={
20 'title': title,
21 'message': message,
22 'from_name': from_name,
23 'from_email': from_email
24 }, subject='Ny henvendelse fra kontaktskjemaet',
25 plain_template='contact/email/contact_form.txt',
26 html_template='contact/email/contact_form.html', from_email=None
27 )
28
[end of lego/apps/contact/send.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lego/apps/contact/send.py b/lego/apps/contact/send.py
--- a/lego/apps/contact/send.py
+++ b/lego/apps/contact/send.py
@@ -9,14 +9,12 @@
"""
anonymous = anonymous if user.is_authenticated else True
abakus_group = AbakusGroup.objects.get(name='Hovedstyret')
- users = [membership.user for membership in abakus_group.memberships.select_related('user')]
- emails = [user.email_address for user in users]
from_name = 'Anonymous' if anonymous else user.full_name
from_email = 'Unknown' if anonymous else user.email_address
send_email.delay(
- to_email=emails, context={
+ to_email=abakus_group.contact_email, context={
'title': title,
'message': message,
'from_name': from_name,
diff --git a/lego/apps/users/fixtures/initial_abakus_groups.py b/lego/apps/users/fixtures/initial_abakus_groups.py
--- a/lego/apps/users/fixtures/initial_abakus_groups.py
+++ b/lego/apps/users/fixtures/initial_abakus_groups.py
@@ -108,11 +108,13 @@
'text': 'hei'
}, {}
],
- 'Hovedstyret':
- [{
- 'logo_id': 'abakus_hs.png',
- 'permissions': ['/sudo/admin/'],
- }, {}]
+ 'Hovedstyret': [
+ {
+ 'logo_id': 'abakus_hs.png',
+ 'permissions': ['/sudo/admin/'],
+ 'contact_email': "[email protected]"
+ }, {}
+ ]
}
],
'Interessegrupper':
| {"golden_diff": "diff --git a/lego/apps/contact/send.py b/lego/apps/contact/send.py\n--- a/lego/apps/contact/send.py\n+++ b/lego/apps/contact/send.py\n@@ -9,14 +9,12 @@\n \"\"\"\n anonymous = anonymous if user.is_authenticated else True\n abakus_group = AbakusGroup.objects.get(name='Hovedstyret')\n- users = [membership.user for membership in abakus_group.memberships.select_related('user')]\n- emails = [user.email_address for user in users]\n \n from_name = 'Anonymous' if anonymous else user.full_name\n from_email = 'Unknown' if anonymous else user.email_address\n \n send_email.delay(\n- to_email=emails, context={\n+ to_email=abakus_group.contact_email, context={\n 'title': title,\n 'message': message,\n 'from_name': from_name,\ndiff --git a/lego/apps/users/fixtures/initial_abakus_groups.py b/lego/apps/users/fixtures/initial_abakus_groups.py\n--- a/lego/apps/users/fixtures/initial_abakus_groups.py\n+++ b/lego/apps/users/fixtures/initial_abakus_groups.py\n@@ -108,11 +108,13 @@\n 'text': 'hei'\n }, {}\n ],\n- 'Hovedstyret':\n- [{\n- 'logo_id': 'abakus_hs.png',\n- 'permissions': ['/sudo/admin/'],\n- }, {}]\n+ 'Hovedstyret': [\n+ {\n+ 'logo_id': 'abakus_hs.png',\n+ 'permissions': ['/sudo/admin/'],\n+ 'contact_email': \"[email protected]\"\n+ }, {}\n+ ]\n }\n ],\n 'Interessegrupper':\n", "issue": "Make contact form send to mail list for HS, instead of individual members\nThe form currently fetches all members of the HS group, and sends the message to each individual address. This makes it cumbersome to discuss the message for HS, it would be better to send it to their mailing list instead.\n", "before_files": [{"content": "from lego.apps.users.constants import GROUP_COMMITTEE, GROUP_GRADE\nfrom lego.apps.users.models import AbakusGroup\nfrom lego.utils.functions import insert_abakus_groups\n\n# isort:skip\n\"\"\"\nThe structure of the tree is key and a list of two dicts.\nThe first dict is the parameters of the current group\nand the second dict are the children of the current group.\n\nE.g. Abakus: [\n {\n description: 'ABAKUSGRUPPE',\n permissions: ['/sudo/...']\n ...\n },\n {\n 'Webkom': [{\n description: 'WEBKOMGRUPPE',\n permissions: ['/sudo/']\n ...\n }, {}]\n }\n]\n\"\"\"\n\ninitial_tree = {\n 'Users': [{\n 'description': 'Brukere p\u00e5 Abakus.no'\n }, {}],\n 'Abakus': [\n {\n 'description':\n 'Medlemmer av Abakus',\n 'permissions': [\n '/sudo/admin/meetings/create', '/sudo/admin/meetinginvitations/create',\n '/sudo/admin/registrations/create/', '/sudo/admin/events/payment/',\n '/sudo/admin/comments/create'\n ]\n },\n {\n 'Abakom': [\n {\n 'description':\n 'Medlemmer av Abakom',\n 'permissions': [\n '/sudo/admin/events/',\n '/sudo/admin/pools/',\n '/sudo/admin/registrations/',\n '/sudo/admin/companies/',\n '/sudo/admin/joblistings/',\n ]\n },\n {\n 'Arrkom': [{\n 'type': GROUP_COMMITTEE,\n 'logo_id': 'abakus_arrkom.png'\n }, {}],\n 'backup': [{\n 'type': GROUP_COMMITTEE,\n 'logo_id': 'abakus_backup.png'\n }, {}],\n 'Bedkom': [\n {\n 'type':\n GROUP_COMMITTEE,\n 'logo_id':\n 'abakus_bedkom.png',\n 'permissions': [\n '/sudo/admin/companyinterest/', '/sudo/admin/surveys/',\n '/sudo/admin/submissions/'\n ]\n }, {}\n ],\n 'Fagkom': [\n {\n 'type':\n GROUP_COMMITTEE,\n 'logo_id':\n 'abakus_fagkom.png',\n 'permissions': [\n '/sudo/admin/companyinterest/', '/sudo/admin/surveys/',\n '/sudo/admin/submissions/'\n ]\n }, {}\n ],\n 'Koskom': [{\n 'type': GROUP_COMMITTEE,\n 'logo_id': 'abakus_koskom.png'\n }, {}],\n 'LaBamba': [{\n 'type': GROUP_COMMITTEE,\n 'logo_id': 'abakus_labamba.png'\n }, {}],\n 'PR': [{\n 'type': GROUP_COMMITTEE,\n 'logo_id': 'abakus_pr.png'\n }, {}],\n 'readme': [{\n 'type': GROUP_COMMITTEE,\n 'logo_id': 'abakus_readme.png'\n }, {}],\n 'Webkom': [\n {\n 'type': GROUP_COMMITTEE,\n 'logo_id': 'abakus_webkom.png',\n 'permissions': ['/sudo/'],\n 'text': 'hei'\n }, {}\n ],\n 'Hovedstyret':\n [{\n 'logo_id': 'abakus_hs.png',\n 'permissions': ['/sudo/admin/'],\n }, {}]\n }\n ],\n 'Interessegrupper':\n [{\n 'description': 'Super-gruppe for alle interessegrupper i Abakus'\n }, {}]\n }\n ],\n 'Students': [\n {},\n {\n 'Datateknologi': [\n {},\n {\n '1. klasse Datateknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n '2. klasse Datateknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n '3. klasse Datateknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n '4. klasse Datateknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n '5. klasse Datateknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n }\n ],\n 'Kommunikasjonsteknologi': [\n {},\n {\n '1. klasse Kommunikasjonsteknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n '2. klasse Kommunikasjonsteknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n '3. klasse Kommunikasjonsteknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n '4. klasse Kommunikasjonsteknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n '5. klasse Kommunikasjonsteknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n }\n ]\n }\n ]\n}\n\n\ndef load_abakus_groups():\n insert_abakus_groups(initial_tree)\n AbakusGroup.objects.rebuild()\n", "path": "lego/apps/users/fixtures/initial_abakus_groups.py"}, {"content": "from lego.apps.users.models import AbakusGroup\nfrom lego.utils.tasks import send_email\n\n\ndef send_message(title, message, user, anonymous):\n \"\"\"\n Send a message to HS when users posts to the contact form.\n Don't catch AbakusGroup.DoesNotExist, this notifies us when the group doesn't exist.\n \"\"\"\n anonymous = anonymous if user.is_authenticated else True\n abakus_group = AbakusGroup.objects.get(name='Hovedstyret')\n users = [membership.user for membership in abakus_group.memberships.select_related('user')]\n emails = [user.email_address for user in users]\n\n from_name = 'Anonymous' if anonymous else user.full_name\n from_email = 'Unknown' if anonymous else user.email_address\n\n send_email.delay(\n to_email=emails, context={\n 'title': title,\n 'message': message,\n 'from_name': from_name,\n 'from_email': from_email\n }, subject='Ny henvendelse fra kontaktskjemaet',\n plain_template='contact/email/contact_form.txt',\n html_template='contact/email/contact_form.html', from_email=None\n )\n", "path": "lego/apps/contact/send.py"}]} | 2,436 | 393 |
gh_patches_debug_17230 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-2988 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Parsing and usage of boolean environment variables doesn't work as expected
We cannot specify "falsy" values via environment variables, as they will be overridden by `default` in `get_env`.
Also some integrations (e.g. `botocore` for setting `distributed_tracing`) don't parse the result of `get_env` correctly, and thus cannot be changed via environment variables.
I would like to set `config.botocore["distributing_tracing"]` to `False` via the `DD_BOTOCORE_DISTRIBUTED_TRACING` environment variable, but because the default is `True`, there is no way to change the semantics of the setting to disable the behaviour.
There are two issues, and if either of them was fixed, the problem would be fixed.
I would be happy to provide a fix for both of them, or be directed to a way to fix it, without having to add extra code to my project.
## Empty environment variables are assumed to be missing
If you do `DD_BOTOCORE_DISTRIBUTED_TRACING=` (an empty string), [the relevant code](https://github.com/DataDog/dd-trace-py/blob/e7a73336ab7c4c1479d504f86730c4dfaeb2e5f7/ddtrace/utils/formats.py#L47-L58) will replace it with `default`, in which case it would be `True`:
```python
def get_env(*parts, **kwargs):
...
# env = "DD_BOTOCORE_DISTRIBUTED_TRACING"
value = os.getenv(env) # value = ""
...
# legacy = None
value = value or legacy # value = None
# default = True
return value if value else default # return True
```
The fix would be to check for `None`, instead for "falsy":
```python
if value is None:
if legacy is not None:
value = legacy
else:
value = default
```
## Usage of `get_env` for booleans should convert text values to boolean
When the `botocore` integration [uses](https://github.com/DataDog/dd-trace-py/blob/e7a73336ab7c4c1479d504f86730c4dfaeb2e5f7/ddtrace/contrib/botocore/patch.py#L39) `get_env` to define a boolean option, it should convert common boolean text values to boolean, and reject others:
```python
config._add(
"botocore",
{
"distributed_tracing": get_bool_env("botocore", "distributed_tracing", default=True),
...,
},
)
```
And
```python
TRUE_STRINGS = [True, "true", "yes", "y", "enable", "enabled", "1"]
FALSE_STRINGS = [False, "false", "no", "n", "disable", "disabled", "0"]
def to_boolean(value: str) -> bool:
if value.lower() in TRUE_STRINGS:
return True
if value.lower() in FALSE_STRINGS:
return False
raise ValueError(f"Unknown boolean value '{value}'")
def get_bool_env(*parts, **kwargs):
return to_boolean(get_env(*parts, **kwargs))
```
### Which version of dd-trace-py are you using?
`ddtrace==0.55.3`
### Which version of pip are you using?
`pip 21.2.4 (python 3.8)`
### Which version of the libraries are you using?
Not relevant
### How can we reproduce your problem?
First issue:
Before running the script:
```shell
DD_BOTOCORE_DISTRIBUTED_TRACING=
```
Run the script:
```python3
from ddtrace import config
from ddtrace.contrib import botocore
print(config.botocore["distributed_tracing"])
print(bool(config.botocore["distributed_tracing"]))
```
Outputs:
```
True
True
```
Second issue:
Before running the script:
```shell
DD_BOTOCORE_DISTRIBUTED_TRACING=False
```
Run the script:
```python3
from ddtrace import config
from ddtrace.contrib import botocore
print(config.botocore["distributed_tracing"])
print(bool(config.botocore["distributed_tracing"]))
```
Outputs:
```
False
True
```
### What is the result that you get?
I cannot meaningfully change the value of `config.botocore["distributed_tracing"]`
### What is the result that you expected?
I want to be able to meaningfully change `config.botocore["distributed_tracing"]` via the environment variable (i.e. `DD_BOTOCORE_DISTRIBUTED_TRACING`)
</issue>
<code>
[start of ddtrace/contrib/botocore/patch.py]
1 """
2 Trace queries to aws api done via botocore client
3 """
4 # 3p
5 import base64
6 import json
7
8 import botocore.client
9
10 from ddtrace import config
11 from ddtrace.vendor import wrapt
12
13 # project
14 from ...constants import ANALYTICS_SAMPLE_RATE_KEY
15 from ...constants import SPAN_MEASURED_KEY
16 from ...ext import SpanTypes
17 from ...ext import aws
18 from ...ext import http
19 from ...internal.logger import get_logger
20 from ...pin import Pin
21 from ...propagation.http import HTTPPropagator
22 from ...utils import get_argument_value
23 from ...utils.formats import deep_getattr
24 from ...utils.formats import get_env
25 from ...utils.wrappers import unwrap
26
27
28 # Original botocore client class
29 _Botocore_client = botocore.client.BaseClient
30
31 ARGS_NAME = ("action", "params", "path", "verb")
32 TRACED_ARGS = {"params", "path", "verb"}
33
34 log = get_logger(__name__)
35
36 # Botocore default settings
37 config._add(
38 "botocore",
39 {
40 "distributed_tracing": get_env("botocore", "distributed_tracing", default=True),
41 "invoke_with_legacy_context": get_env("botocore", "invoke_with_legacy_context", default=False),
42 },
43 )
44
45
46 def inject_trace_data_to_message_attributes(trace_data, entry):
47 if "MessageAttributes" not in entry:
48 entry["MessageAttributes"] = {}
49 # An Amazon SQS message can contain up to 10 metadata attributes.
50 if len(entry["MessageAttributes"]) < 10:
51 entry["MessageAttributes"]["_datadog"] = {"DataType": "String", "StringValue": json.dumps(trace_data)}
52 else:
53 log.debug("skipping trace injection, max number (10) of MessageAttributes exceeded")
54
55
56 def inject_trace_to_sqs_batch_message(args, span):
57 trace_data = {}
58 HTTPPropagator.inject(span.context, trace_data)
59 params = args[1]
60
61 for entry in params["Entries"]:
62 inject_trace_data_to_message_attributes(trace_data, entry)
63
64
65 def inject_trace_to_sqs_message(args, span):
66 trace_data = {}
67 HTTPPropagator.inject(span.context, trace_data)
68 params = args[1]
69
70 inject_trace_data_to_message_attributes(trace_data, params)
71
72
73 def modify_client_context(client_context_object, trace_headers):
74 if config.botocore["invoke_with_legacy_context"]:
75 trace_headers = {"_datadog": trace_headers}
76
77 if "custom" in client_context_object:
78 client_context_object["custom"].update(trace_headers)
79 else:
80 client_context_object["custom"] = trace_headers
81
82
83 def inject_trace_to_client_context(args, span):
84 trace_headers = {}
85 HTTPPropagator.inject(span.context, trace_headers)
86 client_context_object = {}
87 params = args[1]
88 if "ClientContext" in params:
89 try:
90 client_context_json = base64.b64decode(params["ClientContext"]).decode("utf-8")
91 client_context_object = json.loads(client_context_json)
92 except Exception:
93 log.warning("malformed client_context=%s", params["ClientContext"], exc_info=True)
94 return
95 modify_client_context(client_context_object, trace_headers)
96 try:
97 json_context = json.dumps(client_context_object).encode("utf-8")
98 except Exception:
99 log.warning("unable to encode modified client context as json: %s", client_context_object, exc_info=True)
100 return
101 params["ClientContext"] = base64.b64encode(json_context).decode("utf-8")
102
103
104 def patch():
105 if getattr(botocore.client, "_datadog_patch", False):
106 return
107 setattr(botocore.client, "_datadog_patch", True)
108
109 wrapt.wrap_function_wrapper("botocore.client", "BaseClient._make_api_call", patched_api_call)
110 Pin(service="aws", app="aws").onto(botocore.client.BaseClient)
111
112
113 def unpatch():
114 if getattr(botocore.client, "_datadog_patch", False):
115 setattr(botocore.client, "_datadog_patch", False)
116 unwrap(botocore.client.BaseClient, "_make_api_call")
117
118
119 def patched_api_call(original_func, instance, args, kwargs):
120
121 pin = Pin.get_from(instance)
122 if not pin or not pin.enabled():
123 return original_func(*args, **kwargs)
124
125 endpoint_name = deep_getattr(instance, "_endpoint._endpoint_prefix")
126
127 with pin.tracer.trace(
128 "{}.command".format(endpoint_name), service="{}.{}".format(pin.service, endpoint_name), span_type=SpanTypes.HTTP
129 ) as span:
130 span.set_tag(SPAN_MEASURED_KEY)
131 operation = None
132 if args:
133 operation = get_argument_value(args, kwargs, 0, "operation_name")
134 # DEV: join is the fastest way of concatenating strings that is compatible
135 # across Python versions (see
136 # https://stackoverflow.com/questions/1316887/what-is-the-most-efficient-string-concatenation-method-in-python)
137 span.resource = ".".join((endpoint_name, operation.lower()))
138
139 if config.botocore["distributed_tracing"]:
140 if endpoint_name == "lambda" and operation == "Invoke":
141 inject_trace_to_client_context(args, span)
142 if endpoint_name == "sqs" and operation == "SendMessage":
143 inject_trace_to_sqs_message(args, span)
144 if endpoint_name == "sqs" and operation == "SendMessageBatch":
145 inject_trace_to_sqs_batch_message(args, span)
146
147 else:
148 span.resource = endpoint_name
149
150 aws.add_span_arg_tags(span, endpoint_name, args, ARGS_NAME, TRACED_ARGS)
151
152 region_name = deep_getattr(instance, "meta.region_name")
153
154 span._set_str_tag("aws.agent", "botocore")
155 if operation is not None:
156 span._set_str_tag("aws.operation", operation)
157 if region_name is not None:
158 span._set_str_tag("aws.region", region_name)
159
160 result = original_func(*args, **kwargs)
161
162 response_meta = result.get("ResponseMetadata")
163 if response_meta:
164 if "HTTPStatusCode" in response_meta:
165 span.set_tag(http.STATUS_CODE, response_meta["HTTPStatusCode"])
166
167 if "RetryAttempts" in response_meta:
168 span.set_tag("retry_attempts", response_meta["RetryAttempts"])
169
170 if "RequestId" in response_meta:
171 span.set_tag("aws.requestid", response_meta["RequestId"])
172
173 # set analytics sample rate
174 span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.botocore.get_analytics_sample_rate())
175
176 return result
177
[end of ddtrace/contrib/botocore/patch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ddtrace/contrib/botocore/patch.py b/ddtrace/contrib/botocore/patch.py
--- a/ddtrace/contrib/botocore/patch.py
+++ b/ddtrace/contrib/botocore/patch.py
@@ -20,6 +20,7 @@
from ...pin import Pin
from ...propagation.http import HTTPPropagator
from ...utils import get_argument_value
+from ...utils.formats import asbool
from ...utils.formats import deep_getattr
from ...utils.formats import get_env
from ...utils.wrappers import unwrap
@@ -37,8 +38,8 @@
config._add(
"botocore",
{
- "distributed_tracing": get_env("botocore", "distributed_tracing", default=True),
- "invoke_with_legacy_context": get_env("botocore", "invoke_with_legacy_context", default=False),
+ "distributed_tracing": asbool(get_env("botocore", "distributed_tracing", default=True)),
+ "invoke_with_legacy_context": asbool(get_env("botocore", "invoke_with_legacy_context", default=False)),
},
)
| {"golden_diff": "diff --git a/ddtrace/contrib/botocore/patch.py b/ddtrace/contrib/botocore/patch.py\n--- a/ddtrace/contrib/botocore/patch.py\n+++ b/ddtrace/contrib/botocore/patch.py\n@@ -20,6 +20,7 @@\n from ...pin import Pin\n from ...propagation.http import HTTPPropagator\n from ...utils import get_argument_value\n+from ...utils.formats import asbool\n from ...utils.formats import deep_getattr\n from ...utils.formats import get_env\n from ...utils.wrappers import unwrap\n@@ -37,8 +38,8 @@\n config._add(\n \"botocore\",\n {\n- \"distributed_tracing\": get_env(\"botocore\", \"distributed_tracing\", default=True),\n- \"invoke_with_legacy_context\": get_env(\"botocore\", \"invoke_with_legacy_context\", default=False),\n+ \"distributed_tracing\": asbool(get_env(\"botocore\", \"distributed_tracing\", default=True)),\n+ \"invoke_with_legacy_context\": asbool(get_env(\"botocore\", \"invoke_with_legacy_context\", default=False)),\n },\n )\n", "issue": "Parsing and usage of boolean environment variables doesn't work as expected\nWe cannot specify \"falsy\" values via environment variables, as they will be overridden by `default` in `get_env`.\r\n\r\nAlso some integrations (e.g. `botocore` for setting `distributed_tracing`) don't parse the result of `get_env` correctly, and thus cannot be changed via environment variables.\r\n\r\nI would like to set `config.botocore[\"distributing_tracing\"]` to `False` via the `DD_BOTOCORE_DISTRIBUTED_TRACING` environment variable, but because the default is `True`, there is no way to change the semantics of the setting to disable the behaviour.\r\n\r\nThere are two issues, and if either of them was fixed, the problem would be fixed.\r\n\r\nI would be happy to provide a fix for both of them, or be directed to a way to fix it, without having to add extra code to my project.\r\n\r\n## Empty environment variables are assumed to be missing\r\n\r\nIf you do `DD_BOTOCORE_DISTRIBUTED_TRACING=` (an empty string), [the relevant code](https://github.com/DataDog/dd-trace-py/blob/e7a73336ab7c4c1479d504f86730c4dfaeb2e5f7/ddtrace/utils/formats.py#L47-L58) will replace it with `default`, in which case it would be `True`:\r\n\r\n```python\r\ndef get_env(*parts, **kwargs):\r\n ...\r\n\r\n # env = \"DD_BOTOCORE_DISTRIBUTED_TRACING\"\r\n value = os.getenv(env) # value = \"\"\r\n ...\r\n\r\n # legacy = None\r\n value = value or legacy # value = None\r\n # default = True\r\n return value if value else default # return True\r\n```\r\n\r\nThe fix would be to check for `None`, instead for \"falsy\":\r\n\r\n```python\r\nif value is None:\r\n if legacy is not None:\r\n value = legacy\r\n else:\r\n value = default\r\n```\r\n\r\n## Usage of `get_env` for booleans should convert text values to boolean\r\n\r\nWhen the `botocore` integration [uses](https://github.com/DataDog/dd-trace-py/blob/e7a73336ab7c4c1479d504f86730c4dfaeb2e5f7/ddtrace/contrib/botocore/patch.py#L39) `get_env` to define a boolean option, it should convert common boolean text values to boolean, and reject others:\r\n\r\n```python\r\nconfig._add(\r\n \"botocore\",\r\n {\r\n \"distributed_tracing\": get_bool_env(\"botocore\", \"distributed_tracing\", default=True),\r\n ...,\r\n },\r\n)\r\n```\r\nAnd\r\n```python\r\nTRUE_STRINGS = [True, \"true\", \"yes\", \"y\", \"enable\", \"enabled\", \"1\"]\r\nFALSE_STRINGS = [False, \"false\", \"no\", \"n\", \"disable\", \"disabled\", \"0\"]\r\n\r\n\r\ndef to_boolean(value: str) -> bool:\r\n if value.lower() in TRUE_STRINGS:\r\n return True\r\n if value.lower() in FALSE_STRINGS:\r\n return False\r\n raise ValueError(f\"Unknown boolean value '{value}'\")\r\n\r\n\r\n\r\ndef get_bool_env(*parts, **kwargs):\r\n return to_boolean(get_env(*parts, **kwargs))\r\n```\r\n\r\n### Which version of dd-trace-py are you using?\r\n\r\n`ddtrace==0.55.3`\r\n\r\n### Which version of pip are you using?\r\n\r\n`pip 21.2.4 (python 3.8)`\r\n\r\n### Which version of the libraries are you using?\r\n\r\nNot relevant\r\n\r\n### How can we reproduce your problem?\r\n\r\nFirst issue:\r\n\r\nBefore running the script:\r\n\r\n```shell\r\nDD_BOTOCORE_DISTRIBUTED_TRACING=\r\n```\r\n\r\nRun the script:\r\n\r\n```python3\r\nfrom ddtrace import config\r\nfrom ddtrace.contrib import botocore\r\nprint(config.botocore[\"distributed_tracing\"])\r\nprint(bool(config.botocore[\"distributed_tracing\"]))\r\n```\r\n\r\nOutputs:\r\n\r\n```\r\nTrue\r\nTrue\r\n```\r\n\r\nSecond issue:\r\n\r\nBefore running the script:\r\n\r\n```shell\r\nDD_BOTOCORE_DISTRIBUTED_TRACING=False\r\n```\r\n\r\nRun the script:\r\n\r\n```python3\r\nfrom ddtrace import config\r\nfrom ddtrace.contrib import botocore\r\nprint(config.botocore[\"distributed_tracing\"])\r\nprint(bool(config.botocore[\"distributed_tracing\"]))\r\n```\r\n\r\nOutputs:\r\n\r\n```\r\nFalse\r\nTrue\r\n```\r\n\r\n\r\n### What is the result that you get?\r\n\r\nI cannot meaningfully change the value of `config.botocore[\"distributed_tracing\"]`\r\n\r\n### What is the result that you expected?\r\n\r\nI want to be able to meaningfully change `config.botocore[\"distributed_tracing\"]` via the environment variable (i.e. `DD_BOTOCORE_DISTRIBUTED_TRACING`)\r\n\n", "before_files": [{"content": "\"\"\"\nTrace queries to aws api done via botocore client\n\"\"\"\n# 3p\nimport base64\nimport json\n\nimport botocore.client\n\nfrom ddtrace import config\nfrom ddtrace.vendor import wrapt\n\n# project\nfrom ...constants import ANALYTICS_SAMPLE_RATE_KEY\nfrom ...constants import SPAN_MEASURED_KEY\nfrom ...ext import SpanTypes\nfrom ...ext import aws\nfrom ...ext import http\nfrom ...internal.logger import get_logger\nfrom ...pin import Pin\nfrom ...propagation.http import HTTPPropagator\nfrom ...utils import get_argument_value\nfrom ...utils.formats import deep_getattr\nfrom ...utils.formats import get_env\nfrom ...utils.wrappers import unwrap\n\n\n# Original botocore client class\n_Botocore_client = botocore.client.BaseClient\n\nARGS_NAME = (\"action\", \"params\", \"path\", \"verb\")\nTRACED_ARGS = {\"params\", \"path\", \"verb\"}\n\nlog = get_logger(__name__)\n\n# Botocore default settings\nconfig._add(\n \"botocore\",\n {\n \"distributed_tracing\": get_env(\"botocore\", \"distributed_tracing\", default=True),\n \"invoke_with_legacy_context\": get_env(\"botocore\", \"invoke_with_legacy_context\", default=False),\n },\n)\n\n\ndef inject_trace_data_to_message_attributes(trace_data, entry):\n if \"MessageAttributes\" not in entry:\n entry[\"MessageAttributes\"] = {}\n # An Amazon SQS message can contain up to 10 metadata attributes.\n if len(entry[\"MessageAttributes\"]) < 10:\n entry[\"MessageAttributes\"][\"_datadog\"] = {\"DataType\": \"String\", \"StringValue\": json.dumps(trace_data)}\n else:\n log.debug(\"skipping trace injection, max number (10) of MessageAttributes exceeded\")\n\n\ndef inject_trace_to_sqs_batch_message(args, span):\n trace_data = {}\n HTTPPropagator.inject(span.context, trace_data)\n params = args[1]\n\n for entry in params[\"Entries\"]:\n inject_trace_data_to_message_attributes(trace_data, entry)\n\n\ndef inject_trace_to_sqs_message(args, span):\n trace_data = {}\n HTTPPropagator.inject(span.context, trace_data)\n params = args[1]\n\n inject_trace_data_to_message_attributes(trace_data, params)\n\n\ndef modify_client_context(client_context_object, trace_headers):\n if config.botocore[\"invoke_with_legacy_context\"]:\n trace_headers = {\"_datadog\": trace_headers}\n\n if \"custom\" in client_context_object:\n client_context_object[\"custom\"].update(trace_headers)\n else:\n client_context_object[\"custom\"] = trace_headers\n\n\ndef inject_trace_to_client_context(args, span):\n trace_headers = {}\n HTTPPropagator.inject(span.context, trace_headers)\n client_context_object = {}\n params = args[1]\n if \"ClientContext\" in params:\n try:\n client_context_json = base64.b64decode(params[\"ClientContext\"]).decode(\"utf-8\")\n client_context_object = json.loads(client_context_json)\n except Exception:\n log.warning(\"malformed client_context=%s\", params[\"ClientContext\"], exc_info=True)\n return\n modify_client_context(client_context_object, trace_headers)\n try:\n json_context = json.dumps(client_context_object).encode(\"utf-8\")\n except Exception:\n log.warning(\"unable to encode modified client context as json: %s\", client_context_object, exc_info=True)\n return\n params[\"ClientContext\"] = base64.b64encode(json_context).decode(\"utf-8\")\n\n\ndef patch():\n if getattr(botocore.client, \"_datadog_patch\", False):\n return\n setattr(botocore.client, \"_datadog_patch\", True)\n\n wrapt.wrap_function_wrapper(\"botocore.client\", \"BaseClient._make_api_call\", patched_api_call)\n Pin(service=\"aws\", app=\"aws\").onto(botocore.client.BaseClient)\n\n\ndef unpatch():\n if getattr(botocore.client, \"_datadog_patch\", False):\n setattr(botocore.client, \"_datadog_patch\", False)\n unwrap(botocore.client.BaseClient, \"_make_api_call\")\n\n\ndef patched_api_call(original_func, instance, args, kwargs):\n\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return original_func(*args, **kwargs)\n\n endpoint_name = deep_getattr(instance, \"_endpoint._endpoint_prefix\")\n\n with pin.tracer.trace(\n \"{}.command\".format(endpoint_name), service=\"{}.{}\".format(pin.service, endpoint_name), span_type=SpanTypes.HTTP\n ) as span:\n span.set_tag(SPAN_MEASURED_KEY)\n operation = None\n if args:\n operation = get_argument_value(args, kwargs, 0, \"operation_name\")\n # DEV: join is the fastest way of concatenating strings that is compatible\n # across Python versions (see\n # https://stackoverflow.com/questions/1316887/what-is-the-most-efficient-string-concatenation-method-in-python)\n span.resource = \".\".join((endpoint_name, operation.lower()))\n\n if config.botocore[\"distributed_tracing\"]:\n if endpoint_name == \"lambda\" and operation == \"Invoke\":\n inject_trace_to_client_context(args, span)\n if endpoint_name == \"sqs\" and operation == \"SendMessage\":\n inject_trace_to_sqs_message(args, span)\n if endpoint_name == \"sqs\" and operation == \"SendMessageBatch\":\n inject_trace_to_sqs_batch_message(args, span)\n\n else:\n span.resource = endpoint_name\n\n aws.add_span_arg_tags(span, endpoint_name, args, ARGS_NAME, TRACED_ARGS)\n\n region_name = deep_getattr(instance, \"meta.region_name\")\n\n span._set_str_tag(\"aws.agent\", \"botocore\")\n if operation is not None:\n span._set_str_tag(\"aws.operation\", operation)\n if region_name is not None:\n span._set_str_tag(\"aws.region\", region_name)\n\n result = original_func(*args, **kwargs)\n\n response_meta = result.get(\"ResponseMetadata\")\n if response_meta:\n if \"HTTPStatusCode\" in response_meta:\n span.set_tag(http.STATUS_CODE, response_meta[\"HTTPStatusCode\"])\n\n if \"RetryAttempts\" in response_meta:\n span.set_tag(\"retry_attempts\", response_meta[\"RetryAttempts\"])\n\n if \"RequestId\" in response_meta:\n span.set_tag(\"aws.requestid\", response_meta[\"RequestId\"])\n\n # set analytics sample rate\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.botocore.get_analytics_sample_rate())\n\n return result\n", "path": "ddtrace/contrib/botocore/patch.py"}]} | 3,466 | 253 |
gh_patches_debug_9074 | rasdani/github-patches | git_diff | googleapis__google-auth-library-python-630 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tests broken on Python < 3.8
```python
_______ ERROR collecting tests_async/transport/test_aiohttp_requests.py ________
ImportError while importing test module '/home/tseaver/projects/agendaless/Google/src/google-auth/tests_async/transport/test_aiohttp_requests.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
.nox/unit-3-6/lib/python3.6/site-packages/aioresponses/compat.py:14: in <module>
from unittest import IsolatedAsyncioTestCase, skipIf
E ImportError: cannot import name 'IsolatedAsyncioTestCase'
During handling of the above exception, another exception occurred:
/opt/Python-3.6.10/lib/python3.6/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests_async/transport/test_aiohttp_requests.py:16: in <module>
from aioresponses import aioresponses, core
.nox/unit-3-6/lib/python3.6/site-packages/aioresponses/__init__.py:2: in <module>
from .core import CallbackResult, aioresponses
.nox/unit-3-6/lib/python3.6/site-packages/aioresponses/core.py:23: in <module>
from .compat import (
.nox/unit-3-6/lib/python3.6/site-packages/aioresponses/compat.py:29: in <module>
from asynctest import fail_on, skipIf
E ModuleNotFoundError: No module named 'asynctest'
```
Due to https://github.com/pnuckowski/aioresponses/issues/172
</issue>
<code>
[start of noxfile.py]
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import nox
16
17 TEST_DEPENDENCIES = [
18 "flask",
19 "freezegun",
20 "mock",
21 "oauth2client",
22 "pyopenssl",
23 "pytest",
24 "pytest-cov",
25 "pytest-localserver",
26 "requests",
27 "urllib3",
28 "cryptography",
29 "responses",
30 "grpcio",
31 ]
32
33 ASYNC_DEPENDENCIES = ["pytest-asyncio", "aioresponses"]
34
35 BLACK_VERSION = "black==19.3b0"
36 BLACK_PATHS = [
37 "google",
38 "tests",
39 "tests_async",
40 "noxfile.py",
41 "setup.py",
42 "docs/conf.py",
43 ]
44
45
46 @nox.session(python="3.7")
47 def lint(session):
48 session.install("flake8", "flake8-import-order", "docutils", BLACK_VERSION)
49 session.install(".")
50 session.run("black", "--check", *BLACK_PATHS)
51 session.run(
52 "flake8",
53 "--import-order-style=google",
54 "--application-import-names=google,tests,system_tests",
55 "google",
56 "tests",
57 "tests_async",
58 )
59 session.run(
60 "python", "setup.py", "check", "--metadata", "--restructuredtext", "--strict"
61 )
62
63
64 @nox.session(python="3.6")
65 def blacken(session):
66 """Run black.
67
68 Format code to uniform standard.
69
70 This currently uses Python 3.6 due to the automated Kokoro run of synthtool.
71 That run uses an image that doesn't have 3.6 installed. Before updating this
72 check the state of the `gcp_ubuntu_config` we use for that Kokoro run.
73 """
74 session.install(BLACK_VERSION)
75 session.run("black", *BLACK_PATHS)
76
77
78 @nox.session(python=["3.6", "3.7", "3.8"])
79 def unit(session):
80 session.install(*TEST_DEPENDENCIES)
81 session.install(*(ASYNC_DEPENDENCIES))
82 session.install(".")
83 session.run(
84 "pytest",
85 "--cov=google.auth",
86 "--cov=google.oauth2",
87 "--cov=tests",
88 "tests",
89 "tests_async",
90 )
91
92
93 @nox.session(python=["2.7", "3.5"])
94 def unit_prev_versions(session):
95 session.install(*TEST_DEPENDENCIES)
96 session.install(".")
97 session.run(
98 "pytest", "--cov=google.auth", "--cov=google.oauth2", "--cov=tests", "tests"
99 )
100
101
102 @nox.session(python="3.7")
103 def cover(session):
104 session.install(*TEST_DEPENDENCIES)
105 session.install(*(ASYNC_DEPENDENCIES))
106 session.install(".")
107 session.run(
108 "pytest",
109 "--cov=google.auth",
110 "--cov=google.oauth2",
111 "--cov=tests",
112 "--cov=tests_async",
113 "--cov-report=",
114 "tests",
115 "tests_async",
116 )
117 session.run("coverage", "report", "--show-missing", "--fail-under=100")
118
119
120 @nox.session(python="3.7")
121 def docgen(session):
122 session.env["SPHINX_APIDOC_OPTIONS"] = "members,inherited-members,show-inheritance"
123 session.install(*TEST_DEPENDENCIES)
124 session.install("sphinx")
125 session.install(".")
126 session.run("rm", "-r", "docs/reference")
127 session.run(
128 "sphinx-apidoc",
129 "--output-dir",
130 "docs/reference",
131 "--separate",
132 "--module-first",
133 "google",
134 )
135
136
137 @nox.session(python="3.7")
138 def docs(session):
139 session.install("sphinx", "-r", "docs/requirements-docs.txt")
140 session.install(".")
141 session.run("make", "-C", "docs", "html")
142
143
144 @nox.session(python="pypy")
145 def pypy(session):
146 session.install(*TEST_DEPENDENCIES)
147 session.install(".")
148 session.run(
149 "pytest",
150 "--cov=google.auth",
151 "--cov=google.oauth2",
152 "--cov=tests",
153 "tests",
154 "tests_async",
155 )
156
[end of noxfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -30,7 +30,7 @@
"grpcio",
]
-ASYNC_DEPENDENCIES = ["pytest-asyncio", "aioresponses"]
+ASYNC_DEPENDENCIES = ["pytest-asyncio", "aioresponses", "asynctest"]
BLACK_VERSION = "black==19.3b0"
BLACK_PATHS = [
@@ -144,6 +144,7 @@
@nox.session(python="pypy")
def pypy(session):
session.install(*TEST_DEPENDENCIES)
+ session.install(*ASYNC_DEPENDENCIES)
session.install(".")
session.run(
"pytest",
| {"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -30,7 +30,7 @@\n \"grpcio\",\n ]\n \n-ASYNC_DEPENDENCIES = [\"pytest-asyncio\", \"aioresponses\"]\n+ASYNC_DEPENDENCIES = [\"pytest-asyncio\", \"aioresponses\", \"asynctest\"]\n \n BLACK_VERSION = \"black==19.3b0\"\n BLACK_PATHS = [\n@@ -144,6 +144,7 @@\n @nox.session(python=\"pypy\")\n def pypy(session):\n session.install(*TEST_DEPENDENCIES)\n+ session.install(*ASYNC_DEPENDENCIES)\n session.install(\".\")\n session.run(\n \"pytest\",\n", "issue": "Tests broken on Python < 3.8\n```python\r\n_______ ERROR collecting tests_async/transport/test_aiohttp_requests.py ________\r\nImportError while importing test module '/home/tseaver/projects/agendaless/Google/src/google-auth/tests_async/transport/test_aiohttp_requests.py'.\r\nHint: make sure your test modules/packages have valid Python names.\r\nTraceback:\r\n.nox/unit-3-6/lib/python3.6/site-packages/aioresponses/compat.py:14: in <module>\r\n from unittest import IsolatedAsyncioTestCase, skipIf\r\nE ImportError: cannot import name 'IsolatedAsyncioTestCase'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n/opt/Python-3.6.10/lib/python3.6/importlib/__init__.py:126: in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\ntests_async/transport/test_aiohttp_requests.py:16: in <module>\r\n from aioresponses import aioresponses, core\r\n.nox/unit-3-6/lib/python3.6/site-packages/aioresponses/__init__.py:2: in <module>\r\n from .core import CallbackResult, aioresponses\r\n.nox/unit-3-6/lib/python3.6/site-packages/aioresponses/core.py:23: in <module>\r\n from .compat import (\r\n.nox/unit-3-6/lib/python3.6/site-packages/aioresponses/compat.py:29: in <module>\r\n from asynctest import fail_on, skipIf\r\nE ModuleNotFoundError: No module named 'asynctest'\r\n```\r\n\r\nDue to https://github.com/pnuckowski/aioresponses/issues/172\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport nox\n\nTEST_DEPENDENCIES = [\n \"flask\",\n \"freezegun\",\n \"mock\",\n \"oauth2client\",\n \"pyopenssl\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-localserver\",\n \"requests\",\n \"urllib3\",\n \"cryptography\",\n \"responses\",\n \"grpcio\",\n]\n\nASYNC_DEPENDENCIES = [\"pytest-asyncio\", \"aioresponses\"]\n\nBLACK_VERSION = \"black==19.3b0\"\nBLACK_PATHS = [\n \"google\",\n \"tests\",\n \"tests_async\",\n \"noxfile.py\",\n \"setup.py\",\n \"docs/conf.py\",\n]\n\n\[email protected](python=\"3.7\")\ndef lint(session):\n session.install(\"flake8\", \"flake8-import-order\", \"docutils\", BLACK_VERSION)\n session.install(\".\")\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n session.run(\n \"flake8\",\n \"--import-order-style=google\",\n \"--application-import-names=google,tests,system_tests\",\n \"google\",\n \"tests\",\n \"tests_async\",\n )\n session.run(\n \"python\", \"setup.py\", \"check\", \"--metadata\", \"--restructuredtext\", \"--strict\"\n )\n\n\[email protected](python=\"3.6\")\ndef blacken(session):\n \"\"\"Run black.\n\n Format code to uniform standard.\n\n This currently uses Python 3.6 due to the automated Kokoro run of synthtool.\n That run uses an image that doesn't have 3.6 installed. Before updating this\n check the state of the `gcp_ubuntu_config` we use for that Kokoro run.\n \"\"\"\n session.install(BLACK_VERSION)\n session.run(\"black\", *BLACK_PATHS)\n\n\[email protected](python=[\"3.6\", \"3.7\", \"3.8\"])\ndef unit(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(*(ASYNC_DEPENDENCIES))\n session.install(\".\")\n session.run(\n \"pytest\",\n \"--cov=google.auth\",\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n \"tests\",\n \"tests_async\",\n )\n\n\[email protected](python=[\"2.7\", \"3.5\"])\ndef unit_prev_versions(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(\".\")\n session.run(\n \"pytest\", \"--cov=google.auth\", \"--cov=google.oauth2\", \"--cov=tests\", \"tests\"\n )\n\n\[email protected](python=\"3.7\")\ndef cover(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(*(ASYNC_DEPENDENCIES))\n session.install(\".\")\n session.run(\n \"pytest\",\n \"--cov=google.auth\",\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n \"--cov=tests_async\",\n \"--cov-report=\",\n \"tests\",\n \"tests_async\",\n )\n session.run(\"coverage\", \"report\", \"--show-missing\", \"--fail-under=100\")\n\n\[email protected](python=\"3.7\")\ndef docgen(session):\n session.env[\"SPHINX_APIDOC_OPTIONS\"] = \"members,inherited-members,show-inheritance\"\n session.install(*TEST_DEPENDENCIES)\n session.install(\"sphinx\")\n session.install(\".\")\n session.run(\"rm\", \"-r\", \"docs/reference\")\n session.run(\n \"sphinx-apidoc\",\n \"--output-dir\",\n \"docs/reference\",\n \"--separate\",\n \"--module-first\",\n \"google\",\n )\n\n\[email protected](python=\"3.7\")\ndef docs(session):\n session.install(\"sphinx\", \"-r\", \"docs/requirements-docs.txt\")\n session.install(\".\")\n session.run(\"make\", \"-C\", \"docs\", \"html\")\n\n\[email protected](python=\"pypy\")\ndef pypy(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(\".\")\n session.run(\n \"pytest\",\n \"--cov=google.auth\",\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n \"tests\",\n \"tests_async\",\n )\n", "path": "noxfile.py"}]} | 2,277 | 164 |
gh_patches_debug_38536 | rasdani/github-patches | git_diff | chainer__chainer-6175 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`L.Deconvolution2D` lacks `dilate` option
although `F.deconvolution_2d` already supports it.
</issue>
<code>
[start of chainer/links/connection/deconvolution_2d.py]
1 import numpy
2
3 from chainer.backends import cuda
4 from chainer.functions.connection import deconvolution_2d
5 from chainer import initializers
6 from chainer import link
7 from chainer.utils import argument
8 from chainer import variable
9
10
11 class Deconvolution2D(link.Link):
12
13 """__init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0, nobias=False, outsize=None, initialW=None, initial_bias=None, *, groups=1)
14
15 Two dimensional deconvolution function.
16
17 This link wraps the :func:`~chainer.functions.deconvolution_2d` function
18 and holds the filter weight and bias vector as parameters.
19
20 Deconvolution links can use a feature of cuDNN called autotuning, which
21 selects the most efficient CNN algorithm for images of fixed-size,
22 can provide a significant performance boost for fixed neural nets.
23 To enable, set `chainer.using_config('autotune', True)`
24
25 Args:
26 in_channels (int or None): Number of channels of input arrays.
27 If ``None``, parameter initialization will be deferred until the
28 first forward data pass at which time the size will be determined.
29 out_channels (int): Number of channels of output arrays.
30 ksize (int or pair of ints): Size of filters (a.k.a. kernels).
31 ``ksize=k`` and ``ksize=(k, k)`` are equivalent.
32 stride (int or pair of ints): Stride of filter applications.
33 ``stride=s`` and ``stride=(s, s)`` are equivalent.
34 pad (int or pair of ints): Spatial padding width for input arrays.
35 ``pad=p`` and ``pad=(p, p)`` are equivalent.
36 nobias (bool): If ``True``, then this function does not use the bias
37 term.
38 outsize (tuple): Expected output size of deconvolutional operation.
39 It should be pair of height and width :math:`(out_H, out_W)`.
40 Default value is ``None`` and the outsize is estimated by
41 input size, stride and pad.
42 initialW (:ref:`initializer <initializer>`): Initializer to
43 initialize the weight. When it is :class:`numpy.ndarray`,
44 its ``ndim`` should be 4.
45 initial_bias (:ref:`initializer <initializer>`): Initializer to
46 initialize the bias. If ``None``, the bias will be initialized to
47 zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 1.
48 groups (int): The number of groups to use grouped deconvolution. The
49 default is one, where grouped deconvolution is not used.
50
51 The filter weight has four dimensions :math:`(c_I, c_O, k_H, k_W)`
52 which indicate the number of input channels, output channels,
53 height and width of the kernels, respectively.
54 The filter weight is initialized with i.i.d. Gaussian random samples, each
55 of which has zero mean and deviation :math:`\\sqrt{1/(c_I k_H k_W)}` by
56 default.
57
58 The bias vector is of size :math:`c_O`.
59 Its elements are initialized by ``bias`` argument.
60 If ``nobias`` argument is set to True, then this function does not hold
61 the bias parameter.
62
63 The output of this function can be non-deterministic when it uses cuDNN.
64 If ``chainer.configuration.config.cudnn_deterministic`` is ``True`` and
65 cuDNN version is >= v3, it forces cuDNN to use a deterministic algorithm.
66
67 .. seealso::
68 See :func:`chainer.functions.deconvolution_2d` for the definition of
69 two-dimensional convolution.
70
71 .. seealso::
72 See :func:`chainer.links.Convolution2D` for the examples of ways to
73 give arguments to this link.
74
75 .. admonition:: Example
76
77 There are several ways to make a Deconvolution2D link.
78
79 Let an input vector ``x`` be:
80
81 >>> x = np.arange(1 * 3 * 10 * 10, dtype=np.float32).reshape(1, 3, 10, 10)
82
83 1. Give the first three arguments explicitly:
84
85 In this case, all the other arguments are set to the default
86 values.
87
88 >>> l = L.Deconvolution2D(3, 7, 4)
89 >>> y = l(x)
90 >>> y.shape
91 (1, 7, 13, 13)
92
93 2. Omit ``in_channels`` or fill it with ``None``:
94
95 The below two cases are the same.
96
97 >>> l = L.Deconvolution2D(7, 4)
98 >>> y = l(x)
99 >>> y.shape
100 (1, 7, 13, 13)
101
102 >>> l = L.Deconvolution2D(None, 7, 4)
103 >>> y = l(x)
104 >>> y.shape
105 (1, 7, 13, 13)
106
107 When you omit the first argument, you need to specify the other
108 subsequent arguments from ``stride`` as keyword arguments. So the
109 below two cases are the same.
110
111 >>> l = L.Deconvolution2D(None, 7, 4, 2, 1)
112 >>> y = l(x)
113 >>> y.shape
114 (1, 7, 20, 20)
115
116 >>> l = L.Deconvolution2D(7, 4, stride=2, pad=1)
117 >>> y = l(x)
118 >>> y.shape
119 (1, 7, 20, 20)
120
121 """ # NOQA
122
123 def __init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0,
124 nobias=False, outsize=None, initialW=None, initial_bias=None,
125 **kwargs):
126 super(Deconvolution2D, self).__init__()
127
128 groups, = argument.parse_kwargs(
129 kwargs, ('groups', 1),
130 deterministic="deterministic argument is not supported anymore. "
131 "Use chainer.using_config('cudnn_deterministic', value) "
132 "context where value is either `True` or `False`.")
133
134 if ksize is None:
135 out_channels, ksize, in_channels = in_channels, out_channels, None
136
137 self.ksize = ksize
138 self.stride = _pair(stride)
139 self.pad = _pair(pad)
140 self.outsize = (None, None) if outsize is None else outsize
141 self.out_channels = out_channels
142 self.groups = int(groups)
143
144 with self.init_scope():
145 W_initializer = initializers._get_initializer(initialW)
146 self.W = variable.Parameter(W_initializer)
147 if in_channels is not None:
148 self._initialize_params(in_channels)
149
150 if nobias:
151 self.b = None
152 else:
153 if isinstance(initial_bias, (numpy.ndarray, cuda.ndarray)):
154 assert initial_bias.shape == (out_channels,)
155 if initial_bias is None:
156 initial_bias = 0
157 bias_initializer = initializers._get_initializer(initial_bias)
158 self.b = variable.Parameter(bias_initializer, out_channels)
159
160 def _initialize_params(self, in_channels):
161 kh, kw = _pair(self.ksize)
162 if self.out_channels % self.groups != 0:
163 raise ValueError('the number of output channels must be'
164 'divisible by the number of groups')
165 if in_channels % self.groups != 0:
166 raise ValueError('the number of input channels must be'
167 'divisible by the number of groups')
168 W_shape = (in_channels, int(self.out_channels / self.groups), kh, kw)
169 self.W.initialize(W_shape)
170
171 def forward(self, x):
172 if self.W.array is None:
173 self._initialize_params(x.shape[1])
174 return deconvolution_2d.deconvolution_2d(
175 x, self.W, self.b, self.stride, self.pad, self.outsize,
176 groups=self.groups)
177
178
179 def _pair(x):
180 if hasattr(x, '__getitem__'):
181 return x
182 return x, x
183
[end of chainer/links/connection/deconvolution_2d.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chainer/links/connection/deconvolution_2d.py b/chainer/links/connection/deconvolution_2d.py
--- a/chainer/links/connection/deconvolution_2d.py
+++ b/chainer/links/connection/deconvolution_2d.py
@@ -10,7 +10,7 @@
class Deconvolution2D(link.Link):
- """__init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0, nobias=False, outsize=None, initialW=None, initial_bias=None, *, groups=1)
+ """__init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0, nobias=False, outsize=None, initialW=None, initial_bias=None, *, dilate=1, groups=1)
Two dimensional deconvolution function.
@@ -45,6 +45,9 @@
initial_bias (:ref:`initializer <initializer>`): Initializer to
initialize the bias. If ``None``, the bias will be initialized to
zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 1.
+ dilate (:class:`int` or :class:`tuple` of :class:`int` s):
+ Dilation factor of filter applications.
+ ``dilate=d`` and ``dilate=(d, d)`` are equivalent.
groups (int): The number of groups to use grouped deconvolution. The
default is one, where grouped deconvolution is not used.
@@ -125,8 +128,8 @@
**kwargs):
super(Deconvolution2D, self).__init__()
- groups, = argument.parse_kwargs(
- kwargs, ('groups', 1),
+ dilate, groups, = argument.parse_kwargs(
+ kwargs, ('dilate', 1), ('groups', 1),
deterministic="deterministic argument is not supported anymore. "
"Use chainer.using_config('cudnn_deterministic', value) "
"context where value is either `True` or `False`.")
@@ -137,6 +140,7 @@
self.ksize = ksize
self.stride = _pair(stride)
self.pad = _pair(pad)
+ self.dilate = _pair(dilate)
self.outsize = (None, None) if outsize is None else outsize
self.out_channels = out_channels
self.groups = int(groups)
@@ -173,7 +177,7 @@
self._initialize_params(x.shape[1])
return deconvolution_2d.deconvolution_2d(
x, self.W, self.b, self.stride, self.pad, self.outsize,
- groups=self.groups)
+ dilate=self.dilate, groups=self.groups)
def _pair(x):
| {"golden_diff": "diff --git a/chainer/links/connection/deconvolution_2d.py b/chainer/links/connection/deconvolution_2d.py\n--- a/chainer/links/connection/deconvolution_2d.py\n+++ b/chainer/links/connection/deconvolution_2d.py\n@@ -10,7 +10,7 @@\n \n class Deconvolution2D(link.Link):\n \n- \"\"\"__init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0, nobias=False, outsize=None, initialW=None, initial_bias=None, *, groups=1)\n+ \"\"\"__init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0, nobias=False, outsize=None, initialW=None, initial_bias=None, *, dilate=1, groups=1)\n \n Two dimensional deconvolution function.\n \n@@ -45,6 +45,9 @@\n initial_bias (:ref:`initializer <initializer>`): Initializer to\n initialize the bias. If ``None``, the bias will be initialized to\n zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 1.\n+ dilate (:class:`int` or :class:`tuple` of :class:`int` s):\n+ Dilation factor of filter applications.\n+ ``dilate=d`` and ``dilate=(d, d)`` are equivalent.\n groups (int): The number of groups to use grouped deconvolution. The\n default is one, where grouped deconvolution is not used.\n \n@@ -125,8 +128,8 @@\n **kwargs):\n super(Deconvolution2D, self).__init__()\n \n- groups, = argument.parse_kwargs(\n- kwargs, ('groups', 1),\n+ dilate, groups, = argument.parse_kwargs(\n+ kwargs, ('dilate', 1), ('groups', 1),\n deterministic=\"deterministic argument is not supported anymore. \"\n \"Use chainer.using_config('cudnn_deterministic', value) \"\n \"context where value is either `True` or `False`.\")\n@@ -137,6 +140,7 @@\n self.ksize = ksize\n self.stride = _pair(stride)\n self.pad = _pair(pad)\n+ self.dilate = _pair(dilate)\n self.outsize = (None, None) if outsize is None else outsize\n self.out_channels = out_channels\n self.groups = int(groups)\n@@ -173,7 +177,7 @@\n self._initialize_params(x.shape[1])\n return deconvolution_2d.deconvolution_2d(\n x, self.W, self.b, self.stride, self.pad, self.outsize,\n- groups=self.groups)\n+ dilate=self.dilate, groups=self.groups)\n \n \n def _pair(x):\n", "issue": "`L.Deconvolution2D` lacks `dilate` option\nalthough `F.deconvolution_2d` already supports it.\n", "before_files": [{"content": "import numpy\n\nfrom chainer.backends import cuda\nfrom chainer.functions.connection import deconvolution_2d\nfrom chainer import initializers\nfrom chainer import link\nfrom chainer.utils import argument\nfrom chainer import variable\n\n\nclass Deconvolution2D(link.Link):\n\n \"\"\"__init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0, nobias=False, outsize=None, initialW=None, initial_bias=None, *, groups=1)\n\n Two dimensional deconvolution function.\n\n This link wraps the :func:`~chainer.functions.deconvolution_2d` function\n and holds the filter weight and bias vector as parameters.\n\n Deconvolution links can use a feature of cuDNN called autotuning, which\n selects the most efficient CNN algorithm for images of fixed-size, \n can provide a significant performance boost for fixed neural nets.\n To enable, set `chainer.using_config('autotune', True)`\n\n Args:\n in_channels (int or None): Number of channels of input arrays.\n If ``None``, parameter initialization will be deferred until the\n first forward data pass at which time the size will be determined.\n out_channels (int): Number of channels of output arrays.\n ksize (int or pair of ints): Size of filters (a.k.a. kernels).\n ``ksize=k`` and ``ksize=(k, k)`` are equivalent.\n stride (int or pair of ints): Stride of filter applications.\n ``stride=s`` and ``stride=(s, s)`` are equivalent.\n pad (int or pair of ints): Spatial padding width for input arrays.\n ``pad=p`` and ``pad=(p, p)`` are equivalent.\n nobias (bool): If ``True``, then this function does not use the bias\n term.\n outsize (tuple): Expected output size of deconvolutional operation.\n It should be pair of height and width :math:`(out_H, out_W)`.\n Default value is ``None`` and the outsize is estimated by\n input size, stride and pad.\n initialW (:ref:`initializer <initializer>`): Initializer to\n initialize the weight. When it is :class:`numpy.ndarray`,\n its ``ndim`` should be 4.\n initial_bias (:ref:`initializer <initializer>`): Initializer to\n initialize the bias. If ``None``, the bias will be initialized to\n zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 1.\n groups (int): The number of groups to use grouped deconvolution. The\n default is one, where grouped deconvolution is not used.\n\n The filter weight has four dimensions :math:`(c_I, c_O, k_H, k_W)`\n which indicate the number of input channels, output channels,\n height and width of the kernels, respectively.\n The filter weight is initialized with i.i.d. Gaussian random samples, each\n of which has zero mean and deviation :math:`\\\\sqrt{1/(c_I k_H k_W)}` by\n default.\n\n The bias vector is of size :math:`c_O`.\n Its elements are initialized by ``bias`` argument.\n If ``nobias`` argument is set to True, then this function does not hold\n the bias parameter.\n\n The output of this function can be non-deterministic when it uses cuDNN.\n If ``chainer.configuration.config.cudnn_deterministic`` is ``True`` and\n cuDNN version is >= v3, it forces cuDNN to use a deterministic algorithm.\n\n .. seealso::\n See :func:`chainer.functions.deconvolution_2d` for the definition of\n two-dimensional convolution.\n\n .. seealso::\n See :func:`chainer.links.Convolution2D` for the examples of ways to\n give arguments to this link.\n\n .. admonition:: Example\n\n There are several ways to make a Deconvolution2D link.\n\n Let an input vector ``x`` be:\n\n >>> x = np.arange(1 * 3 * 10 * 10, dtype=np.float32).reshape(1, 3, 10, 10)\n\n 1. Give the first three arguments explicitly:\n\n In this case, all the other arguments are set to the default\n values.\n\n >>> l = L.Deconvolution2D(3, 7, 4)\n >>> y = l(x)\n >>> y.shape\n (1, 7, 13, 13)\n\n 2. Omit ``in_channels`` or fill it with ``None``:\n\n The below two cases are the same.\n\n >>> l = L.Deconvolution2D(7, 4)\n >>> y = l(x)\n >>> y.shape\n (1, 7, 13, 13)\n\n >>> l = L.Deconvolution2D(None, 7, 4)\n >>> y = l(x)\n >>> y.shape\n (1, 7, 13, 13)\n\n When you omit the first argument, you need to specify the other\n subsequent arguments from ``stride`` as keyword arguments. So the\n below two cases are the same.\n\n >>> l = L.Deconvolution2D(None, 7, 4, 2, 1)\n >>> y = l(x)\n >>> y.shape\n (1, 7, 20, 20)\n\n >>> l = L.Deconvolution2D(7, 4, stride=2, pad=1)\n >>> y = l(x)\n >>> y.shape\n (1, 7, 20, 20)\n\n \"\"\" # NOQA\n\n def __init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0,\n nobias=False, outsize=None, initialW=None, initial_bias=None,\n **kwargs):\n super(Deconvolution2D, self).__init__()\n\n groups, = argument.parse_kwargs(\n kwargs, ('groups', 1),\n deterministic=\"deterministic argument is not supported anymore. \"\n \"Use chainer.using_config('cudnn_deterministic', value) \"\n \"context where value is either `True` or `False`.\")\n\n if ksize is None:\n out_channels, ksize, in_channels = in_channels, out_channels, None\n\n self.ksize = ksize\n self.stride = _pair(stride)\n self.pad = _pair(pad)\n self.outsize = (None, None) if outsize is None else outsize\n self.out_channels = out_channels\n self.groups = int(groups)\n\n with self.init_scope():\n W_initializer = initializers._get_initializer(initialW)\n self.W = variable.Parameter(W_initializer)\n if in_channels is not None:\n self._initialize_params(in_channels)\n\n if nobias:\n self.b = None\n else:\n if isinstance(initial_bias, (numpy.ndarray, cuda.ndarray)):\n assert initial_bias.shape == (out_channels,)\n if initial_bias is None:\n initial_bias = 0\n bias_initializer = initializers._get_initializer(initial_bias)\n self.b = variable.Parameter(bias_initializer, out_channels)\n\n def _initialize_params(self, in_channels):\n kh, kw = _pair(self.ksize)\n if self.out_channels % self.groups != 0:\n raise ValueError('the number of output channels must be'\n 'divisible by the number of groups')\n if in_channels % self.groups != 0:\n raise ValueError('the number of input channels must be'\n 'divisible by the number of groups')\n W_shape = (in_channels, int(self.out_channels / self.groups), kh, kw)\n self.W.initialize(W_shape)\n\n def forward(self, x):\n if self.W.array is None:\n self._initialize_params(x.shape[1])\n return deconvolution_2d.deconvolution_2d(\n x, self.W, self.b, self.stride, self.pad, self.outsize,\n groups=self.groups)\n\n\ndef _pair(x):\n if hasattr(x, '__getitem__'):\n return x\n return x, x\n", "path": "chainer/links/connection/deconvolution_2d.py"}]} | 2,832 | 635 |
gh_patches_debug_61252 | rasdani/github-patches | git_diff | spotify__luigi-880 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix external dynamic deps
Since running tasks communicate with worker via a queue, all dynamic dependencies that they yield must be serialized and then deserialized back. This doesn't work if a task has `run = NotImplemented`, since there was a specific check for that in Register for unclear reason.
This PR adds a test case to reproduce the issue and fixes it by removing the check.
</issue>
<code>
[start of luigi/task_register.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright 2012-2015 Spotify AB
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 #
17 """
18 Define the centralized register of all :class:`~luigi.task.Task` classes.
19 """
20
21 import abc
22 try:
23 from collections import OrderedDict
24 except ImportError:
25 from ordereddict import OrderedDict
26
27 from luigi import six
28 import logging
29 logger = logging.getLogger('luigi-interface')
30
31
32 class TaskClassException(Exception):
33 pass
34
35
36 class Register(abc.ABCMeta):
37 """
38 The Metaclass of :py:class:`Task`.
39
40 Acts as a global registry of Tasks with the following properties:
41
42 1. Cache instances of objects so that eg. ``X(1, 2, 3)`` always returns the
43 same object.
44 2. Keep track of all subclasses of :py:class:`Task` and expose them.
45 """
46 __instance_cache = {}
47 _default_namespace = None
48 _reg = []
49 AMBIGUOUS_CLASS = object() # Placeholder denoting an error
50 """If this value is returned by :py:meth:`__get_reg` then there is an
51 ambiguous task name (two :py:class:`Task` have the same name). This denotes
52 an error."""
53
54 def __new__(metacls, classname, bases, classdict):
55 """
56 Custom class creation for namespacing.
57
58 Also register all subclasses.
59
60 Set the task namespace to whatever the currently declared namespace is.
61 """
62 if "task_namespace" not in classdict:
63 classdict["task_namespace"] = metacls._default_namespace
64
65 cls = super(Register, metacls).__new__(metacls, classname, bases, classdict)
66 metacls._reg.append(cls)
67
68 return cls
69
70 def __call__(cls, *args, **kwargs):
71 """
72 Custom class instantiation utilizing instance cache.
73
74 If a Task has already been instantiated with the same parameters,
75 the previous instance is returned to reduce number of object instances.
76 """
77 def instantiate():
78 return super(Register, cls).__call__(*args, **kwargs)
79
80 h = cls.__instance_cache
81
82 if h is None: # disabled
83 return instantiate()
84
85 params = cls.get_params()
86 param_values = cls.get_param_values(params, args, kwargs)
87
88 k = (cls, tuple(param_values))
89
90 try:
91 hash(k)
92 except TypeError:
93 logger.debug("Not all parameter values are hashable so instance isn't coming from the cache")
94 return instantiate() # unhashable types in parameters
95
96 if k not in h:
97 h[k] = instantiate()
98
99 return h[k]
100
101 @classmethod
102 def clear_instance_cache(cls):
103 """
104 Clear/Reset the instance cache.
105 """
106 cls.__instance_cache = {}
107
108 @classmethod
109 def disable_instance_cache(cls):
110 """
111 Disables the instance cache.
112 """
113 cls.__instance_cache = None
114
115 @property
116 def task_family(cls):
117 """
118 The task family for the given class.
119
120 If ``cls.task_namespace is None`` then it's the name of the class.
121 Otherwise, ``<task_namespace>.`` is prefixed to the class name.
122 """
123 if cls.task_namespace is None:
124 return cls.__name__
125 else:
126 return "%s.%s" % (cls.task_namespace, cls.__name__)
127
128 @classmethod
129 def __get_reg(cls):
130 """Return all of the registered classes.
131
132 :return: an ``collections.OrderedDict`` of task_family -> class
133 """
134 # We have to do this on-demand in case task names have changed later
135 # We return this in a topologically sorted list of inheritance: this is useful in some cases (#822)
136 reg = OrderedDict()
137 for cls in cls._reg:
138 if cls.run == NotImplemented:
139 continue
140 name = cls.task_family
141
142 if name in reg and reg[name] != cls and \
143 reg[name] != cls.AMBIGUOUS_CLASS and \
144 not issubclass(cls, reg[name]):
145 # Registering two different classes - this means we can't instantiate them by name
146 # The only exception is if one class is a subclass of the other. In that case, we
147 # instantiate the most-derived class (this fixes some issues with decorator wrappers).
148 reg[name] = cls.AMBIGUOUS_CLASS
149 else:
150 reg[name] = cls
151
152 return reg
153
154 @classmethod
155 def task_names(cls):
156 """
157 List of task names as strings
158 """
159 return sorted(cls.__get_reg().keys())
160
161 @classmethod
162 def tasks_str(cls):
163 """
164 Human-readable register contents dump.
165 """
166 return ','.join(cls.task_names())
167
168 @classmethod
169 def get_task_cls(cls, name):
170 """
171 Returns an unambiguous class or raises an exception.
172 """
173 task_cls = cls.__get_reg().get(name)
174 if not task_cls:
175 raise TaskClassException('Task %r not found. Candidates are: %s' % (name, cls.tasks_str()))
176
177 if task_cls == cls.AMBIGUOUS_CLASS:
178 raise TaskClassException('Task %r is ambiguous' % name)
179 return task_cls
180
181 @classmethod
182 def get_all_params(cls):
183 """
184 Compiles and returns all parameters for all :py:class:`Task`.
185
186 :return: a generator of tuples (TODO: we should make this more elegant)
187 """
188 for task_name, task_cls in six.iteritems(cls.__get_reg()):
189 if task_cls == cls.AMBIGUOUS_CLASS:
190 continue
191 for param_name, param_obj in task_cls.get_params():
192 yield task_name, (not task_cls.use_cmdline_section), param_name, param_obj
193
194
195 def load_task(module, task_name, params_str):
196 """
197 Imports task dynamically given a module and a task name.
198 """
199 if module is not None:
200 __import__(module)
201 task_cls = Register.get_task_cls(task_name)
202 return task_cls.from_str_params(params_str)
203
[end of luigi/task_register.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/luigi/task_register.py b/luigi/task_register.py
--- a/luigi/task_register.py
+++ b/luigi/task_register.py
@@ -135,8 +135,6 @@
# We return this in a topologically sorted list of inheritance: this is useful in some cases (#822)
reg = OrderedDict()
for cls in cls._reg:
- if cls.run == NotImplemented:
- continue
name = cls.task_family
if name in reg and reg[name] != cls and \
| {"golden_diff": "diff --git a/luigi/task_register.py b/luigi/task_register.py\n--- a/luigi/task_register.py\n+++ b/luigi/task_register.py\n@@ -135,8 +135,6 @@\n # We return this in a topologically sorted list of inheritance: this is useful in some cases (#822)\n reg = OrderedDict()\n for cls in cls._reg:\n- if cls.run == NotImplemented:\n- continue\n name = cls.task_family\n \n if name in reg and reg[name] != cls and \\\n", "issue": "Fix external dynamic deps\nSince running tasks communicate with worker via a queue, all dynamic dependencies that they yield must be serialized and then deserialized back. This doesn't work if a task has `run = NotImplemented`, since there was a specific check for that in Register for unclear reason.\n\nThis PR adds a test case to reproduce the issue and fixes it by removing the check.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2012-2015 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nDefine the centralized register of all :class:`~luigi.task.Task` classes.\n\"\"\"\n\nimport abc\ntry:\n from collections import OrderedDict\nexcept ImportError:\n from ordereddict import OrderedDict\n\nfrom luigi import six\nimport logging\nlogger = logging.getLogger('luigi-interface')\n\n\nclass TaskClassException(Exception):\n pass\n\n\nclass Register(abc.ABCMeta):\n \"\"\"\n The Metaclass of :py:class:`Task`.\n\n Acts as a global registry of Tasks with the following properties:\n\n 1. Cache instances of objects so that eg. ``X(1, 2, 3)`` always returns the\n same object.\n 2. Keep track of all subclasses of :py:class:`Task` and expose them.\n \"\"\"\n __instance_cache = {}\n _default_namespace = None\n _reg = []\n AMBIGUOUS_CLASS = object() # Placeholder denoting an error\n \"\"\"If this value is returned by :py:meth:`__get_reg` then there is an\n ambiguous task name (two :py:class:`Task` have the same name). This denotes\n an error.\"\"\"\n\n def __new__(metacls, classname, bases, classdict):\n \"\"\"\n Custom class creation for namespacing.\n\n Also register all subclasses.\n\n Set the task namespace to whatever the currently declared namespace is.\n \"\"\"\n if \"task_namespace\" not in classdict:\n classdict[\"task_namespace\"] = metacls._default_namespace\n\n cls = super(Register, metacls).__new__(metacls, classname, bases, classdict)\n metacls._reg.append(cls)\n\n return cls\n\n def __call__(cls, *args, **kwargs):\n \"\"\"\n Custom class instantiation utilizing instance cache.\n\n If a Task has already been instantiated with the same parameters,\n the previous instance is returned to reduce number of object instances.\n \"\"\"\n def instantiate():\n return super(Register, cls).__call__(*args, **kwargs)\n\n h = cls.__instance_cache\n\n if h is None: # disabled\n return instantiate()\n\n params = cls.get_params()\n param_values = cls.get_param_values(params, args, kwargs)\n\n k = (cls, tuple(param_values))\n\n try:\n hash(k)\n except TypeError:\n logger.debug(\"Not all parameter values are hashable so instance isn't coming from the cache\")\n return instantiate() # unhashable types in parameters\n\n if k not in h:\n h[k] = instantiate()\n\n return h[k]\n\n @classmethod\n def clear_instance_cache(cls):\n \"\"\"\n Clear/Reset the instance cache.\n \"\"\"\n cls.__instance_cache = {}\n\n @classmethod\n def disable_instance_cache(cls):\n \"\"\"\n Disables the instance cache.\n \"\"\"\n cls.__instance_cache = None\n\n @property\n def task_family(cls):\n \"\"\"\n The task family for the given class.\n\n If ``cls.task_namespace is None`` then it's the name of the class.\n Otherwise, ``<task_namespace>.`` is prefixed to the class name.\n \"\"\"\n if cls.task_namespace is None:\n return cls.__name__\n else:\n return \"%s.%s\" % (cls.task_namespace, cls.__name__)\n\n @classmethod\n def __get_reg(cls):\n \"\"\"Return all of the registered classes.\n\n :return: an ``collections.OrderedDict`` of task_family -> class\n \"\"\"\n # We have to do this on-demand in case task names have changed later\n # We return this in a topologically sorted list of inheritance: this is useful in some cases (#822)\n reg = OrderedDict()\n for cls in cls._reg:\n if cls.run == NotImplemented:\n continue\n name = cls.task_family\n\n if name in reg and reg[name] != cls and \\\n reg[name] != cls.AMBIGUOUS_CLASS and \\\n not issubclass(cls, reg[name]):\n # Registering two different classes - this means we can't instantiate them by name\n # The only exception is if one class is a subclass of the other. In that case, we\n # instantiate the most-derived class (this fixes some issues with decorator wrappers).\n reg[name] = cls.AMBIGUOUS_CLASS\n else:\n reg[name] = cls\n\n return reg\n\n @classmethod\n def task_names(cls):\n \"\"\"\n List of task names as strings\n \"\"\"\n return sorted(cls.__get_reg().keys())\n\n @classmethod\n def tasks_str(cls):\n \"\"\"\n Human-readable register contents dump.\n \"\"\"\n return ','.join(cls.task_names())\n\n @classmethod\n def get_task_cls(cls, name):\n \"\"\"\n Returns an unambiguous class or raises an exception.\n \"\"\"\n task_cls = cls.__get_reg().get(name)\n if not task_cls:\n raise TaskClassException('Task %r not found. Candidates are: %s' % (name, cls.tasks_str()))\n\n if task_cls == cls.AMBIGUOUS_CLASS:\n raise TaskClassException('Task %r is ambiguous' % name)\n return task_cls\n\n @classmethod\n def get_all_params(cls):\n \"\"\"\n Compiles and returns all parameters for all :py:class:`Task`.\n\n :return: a generator of tuples (TODO: we should make this more elegant)\n \"\"\"\n for task_name, task_cls in six.iteritems(cls.__get_reg()):\n if task_cls == cls.AMBIGUOUS_CLASS:\n continue\n for param_name, param_obj in task_cls.get_params():\n yield task_name, (not task_cls.use_cmdline_section), param_name, param_obj\n\n\ndef load_task(module, task_name, params_str):\n \"\"\"\n Imports task dynamically given a module and a task name.\n \"\"\"\n if module is not None:\n __import__(module)\n task_cls = Register.get_task_cls(task_name)\n return task_cls.from_str_params(params_str)\n", "path": "luigi/task_register.py"}]} | 2,539 | 120 |
gh_patches_debug_25585 | rasdani/github-patches | git_diff | talonhub__community-758 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve phrase history mechanism
instead of this:
https://github.com/knausj85/knausj_talon/blob/3e57e0165257cf07b0e21880d44a91e79cb3ef16/code/history.py#L19-L29
consider something like this:
```py
def on_phrase(j):
global history
words = j.get('text')
if words:
text = ' '.join(words)
history.append(text)
history = history[-setting_command_history_size.get() :]
```
</issue>
<code>
[start of code/history.py]
1 from talon import imgui, Module, speech_system, actions, app
2
3 # We keep command_history_size lines of history, but by default display only
4 # command_history_display of them.
5 mod = Module()
6 setting_command_history_size = mod.setting("command_history_size", int, default=50)
7 setting_command_history_display = mod.setting(
8 "command_history_display", int, default=10
9 )
10
11 hist_more = False
12 history = []
13
14
15 def parse_phrase(word_list):
16 return " ".join(word.split("\\")[0] for word in word_list)
17
18
19 def on_phrase(j):
20 global history
21
22 try:
23 val = parse_phrase(getattr(j["parsed"], "_unmapped", j["phrase"]))
24 except:
25 val = parse_phrase(j["phrase"])
26
27 if val != "":
28 history.append(val)
29 history = history[-setting_command_history_size.get() :]
30
31
32 # todo: dynamic rect?
33 @imgui.open(y=0)
34 def gui(gui: imgui.GUI):
35 global history
36 gui.text("Command History")
37 gui.line()
38 text = (
39 history[:] if hist_more else history[-setting_command_history_display.get() :]
40 )
41 for line in text:
42 gui.text(line)
43
44 gui.spacer()
45 if gui.button("Command history close"):
46 actions.user.history_disable()
47
48
49 speech_system.register("phrase", on_phrase)
50
51
52 @mod.action_class
53 class Actions:
54 def history_toggle():
55 """Toggles viewing the history"""
56 if gui.showing:
57 gui.hide()
58 else:
59 gui.show()
60
61 def history_enable():
62 """Enables the history"""
63 gui.show()
64
65 def history_disable():
66 """Disables the history"""
67 gui.hide()
68
69 def history_clear():
70 """Clear the history"""
71 global history
72 history = []
73
74 def history_more():
75 """Show more history"""
76 global hist_more
77 hist_more = True
78
79 def history_less():
80 """Show less history"""
81 global hist_more
82 hist_more = False
83
84 def history_get(number: int):
85 """returns the history entry at the specified index"""
86 num = (0 - number) - 1
87 return history[num]
88
[end of code/history.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/code/history.py b/code/history.py
--- a/code/history.py
+++ b/code/history.py
@@ -1,3 +1,4 @@
+from typing import Optional
from talon import imgui, Module, speech_system, actions, app
# We keep command_history_size lines of history, but by default display only
@@ -12,20 +13,15 @@
history = []
-def parse_phrase(word_list):
- return " ".join(word.split("\\")[0] for word in word_list)
-
-
def on_phrase(j):
global history
- try:
- val = parse_phrase(getattr(j["parsed"], "_unmapped", j["phrase"]))
- except:
- val = parse_phrase(j["phrase"])
+ words = j.get('text')
+
+ text = actions.user.history_transform_phrase_text(words)
- if val != "":
- history.append(val)
+ if text is not None:
+ history.append(text)
history = history[-setting_command_history_size.get() :]
@@ -85,3 +81,11 @@
"""returns the history entry at the specified index"""
num = (0 - number) - 1
return history[num]
+
+ def history_transform_phrase_text(words: list[str]) -> Optional[str]:
+ """Transforms phrase text for presentation in history. Return `None` to omit from history"""
+
+ if not actions.speech.enabled():
+ return None
+
+ return ' '.join(words) if words else None
\ No newline at end of file
| {"golden_diff": "diff --git a/code/history.py b/code/history.py\n--- a/code/history.py\n+++ b/code/history.py\n@@ -1,3 +1,4 @@\n+from typing import Optional\n from talon import imgui, Module, speech_system, actions, app\n \n # We keep command_history_size lines of history, but by default display only\n@@ -12,20 +13,15 @@\n history = []\n \n \n-def parse_phrase(word_list):\n- return \" \".join(word.split(\"\\\\\")[0] for word in word_list)\n-\n-\n def on_phrase(j):\n global history\n \n- try:\n- val = parse_phrase(getattr(j[\"parsed\"], \"_unmapped\", j[\"phrase\"]))\n- except:\n- val = parse_phrase(j[\"phrase\"])\n+ words = j.get('text')\n+\n+ text = actions.user.history_transform_phrase_text(words)\n \n- if val != \"\":\n- history.append(val)\n+ if text is not None:\n+ history.append(text)\n history = history[-setting_command_history_size.get() :]\n \n \n@@ -85,3 +81,11 @@\n \"\"\"returns the history entry at the specified index\"\"\"\n num = (0 - number) - 1\n return history[num]\n+\n+ def history_transform_phrase_text(words: list[str]) -> Optional[str]:\n+ \"\"\"Transforms phrase text for presentation in history. Return `None` to omit from history\"\"\"\n+\n+ if not actions.speech.enabled():\n+ return None\n+\n+ return ' '.join(words) if words else None\n\\ No newline at end of file\n", "issue": "Improve phrase history mechanism\ninstead of this:\r\n\r\nhttps://github.com/knausj85/knausj_talon/blob/3e57e0165257cf07b0e21880d44a91e79cb3ef16/code/history.py#L19-L29\r\n\r\nconsider something like this:\r\n\r\n```py\r\ndef on_phrase(j):\r\n global history\r\n words = j.get('text')\r\n if words:\r\n text = ' '.join(words)\r\n history.append(text)\r\n history = history[-setting_command_history_size.get() :]\r\n```\n", "before_files": [{"content": "from talon import imgui, Module, speech_system, actions, app\n\n# We keep command_history_size lines of history, but by default display only\n# command_history_display of them.\nmod = Module()\nsetting_command_history_size = mod.setting(\"command_history_size\", int, default=50)\nsetting_command_history_display = mod.setting(\n \"command_history_display\", int, default=10\n)\n\nhist_more = False\nhistory = []\n\n\ndef parse_phrase(word_list):\n return \" \".join(word.split(\"\\\\\")[0] for word in word_list)\n\n\ndef on_phrase(j):\n global history\n\n try:\n val = parse_phrase(getattr(j[\"parsed\"], \"_unmapped\", j[\"phrase\"]))\n except:\n val = parse_phrase(j[\"phrase\"])\n\n if val != \"\":\n history.append(val)\n history = history[-setting_command_history_size.get() :]\n\n\n# todo: dynamic rect?\[email protected](y=0)\ndef gui(gui: imgui.GUI):\n global history\n gui.text(\"Command History\")\n gui.line()\n text = (\n history[:] if hist_more else history[-setting_command_history_display.get() :]\n )\n for line in text:\n gui.text(line)\n\n gui.spacer()\n if gui.button(\"Command history close\"):\n actions.user.history_disable()\n\n\nspeech_system.register(\"phrase\", on_phrase)\n\n\[email protected]_class\nclass Actions:\n def history_toggle():\n \"\"\"Toggles viewing the history\"\"\"\n if gui.showing:\n gui.hide()\n else:\n gui.show()\n\n def history_enable():\n \"\"\"Enables the history\"\"\"\n gui.show()\n\n def history_disable():\n \"\"\"Disables the history\"\"\"\n gui.hide()\n\n def history_clear():\n \"\"\"Clear the history\"\"\"\n global history\n history = []\n\n def history_more():\n \"\"\"Show more history\"\"\"\n global hist_more\n hist_more = True\n\n def history_less():\n \"\"\"Show less history\"\"\"\n global hist_more\n hist_more = False\n\n def history_get(number: int):\n \"\"\"returns the history entry at the specified index\"\"\"\n num = (0 - number) - 1\n return history[num]\n", "path": "code/history.py"}]} | 1,300 | 341 |
gh_patches_debug_16299 | rasdani/github-patches | git_diff | sopel-irc__sopel-1347 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
search: Unicode URLs get mojibake'd under Python 2
Behold, the curse of bad Unicode handling:
````
23:33:27 <~dgw> ;g grandorder.wiki chulainn alter
23:33:28 <&Kaede> dgw: https://grandorder.wiki/Cú_Chulainn_(Alter)
````
Versus the glory of Python 3:
````
01:00:10 <~dgw> .g grandorder.wiki chulainn alter
01:00:11 <Sopel> dgw: https://grandorder.wiki/Cú_Chulainn_(Alter)
````
Thanks to @prahulkgp for triggering this error, so I could find it. I've already added a test case using this exact query locally, both to verify the fix and to see that it doesn't break later. (Here's hoping it doesn't start bugging out Travis builds like the existing Bing tests sometimes do.)
</issue>
<code>
[start of sopel/modules/search.py]
1 # coding=utf-8
2 # Copyright 2008-9, Sean B. Palmer, inamidst.com
3 # Copyright 2012, Elsie Powell, embolalia.com
4 # Licensed under the Eiffel Forum License 2.
5 from __future__ import unicode_literals, absolute_import, print_function, division
6
7 import re
8 from sopel import web
9 from sopel.module import commands, example
10 import requests
11 import xmltodict
12 import sys
13
14 if sys.version_info.major < 3:
15 from urllib import quote_plus, unquote
16 else:
17 from urllib.parse import quote_plus, unquote
18
19
20 def formatnumber(n):
21 """Format a number with beautiful commas."""
22 parts = list(str(n))
23 for i in range((len(parts) - 3), 0, -3):
24 parts.insert(i, ',')
25 return ''.join(parts)
26
27
28 r_bing = re.compile(r'<h2(?: class=" b_topTitle")?><a href="([^"]+)"')
29
30
31 def bing_search(query, lang='en-US'):
32 base = 'https://www.bing.com/search?mkt=%s&q=' % lang
33 bytes = requests.get(base + query).text
34 m = r_bing.search(bytes)
35 if m:
36 return m.group(1)
37
38
39 r_duck = re.compile(r'nofollow" class="[^"]+" href="(?!(?:https?:\/\/r\.search\.yahoo)|(?:https?:\/\/duckduckgo\.com\/y\.js)(?:\/l\/\?kh=-1&uddg=))(.*?)">')
40
41
42 def duck_search(query):
43 query = query.replace('!', '')
44 uri = 'https://duckduckgo.com/html/?q=%s&kl=us-en' % query
45 bytes = requests.get(uri, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}).text
46 if 'web-result' in bytes: # filter out the adds on top of the page
47 bytes = bytes.split('web-result')[1]
48 m = r_duck.search(bytes)
49 if m:
50 unquoted_m = unquote(m.group(1))
51 return web.decode(unquoted_m)
52
53
54 # Alias google_search to duck_search
55 google_search = duck_search
56
57
58 def duck_api(query):
59 if '!bang' in query.lower():
60 return 'https://duckduckgo.com/bang.html'
61
62 # This fixes issue #885 (https://github.com/sopel-irc/sopel/issues/885)
63 # It seems that duckduckgo api redirects to its Instant answer API html page
64 # if the query constains special charactares that aren't urlencoded.
65 # So in order to always get a JSON response back the query is urlencoded
66 query = quote_plus(query)
67 uri = 'https://api.duckduckgo.com/?q=%s&format=json&no_html=1&no_redirect=1' % query
68 try:
69 results = requests.get(uri).json()
70 except ValueError:
71 return None
72 if results['Redirect']:
73 return results['Redirect']
74 else:
75 return None
76
77
78 @commands('duck', 'ddg', 'g')
79 @example('.duck sopel irc bot', r'https?:\/\/sopel\.chat\/?', re=True)
80 def duck(bot, trigger):
81 """Queries Duck Duck Go for the specified input."""
82 query = trigger.group(2)
83 if not query:
84 return bot.reply('.ddg what?')
85
86 # If the API gives us something, say it and stop
87 result = duck_api(query)
88 if result:
89 bot.reply(result)
90 return
91
92 # Otherwise, look it up on the HTMl version
93 uri = duck_search(query)
94
95 if uri:
96 bot.reply(uri)
97 if 'last_seen_url' in bot.memory:
98 bot.memory['last_seen_url'][trigger.sender] = uri
99 else:
100 bot.reply("No results found for '%s'." % query)
101
102
103 @commands('bing')
104 @example('.bing sopel irc bot', r'https?:\/\/sopel\.chat\/?', re=True)
105 def bing(bot, trigger):
106 """Queries Bing for the specified input."""
107 if not trigger.group(2):
108 return bot.reply('.bing what?')
109 query = trigger.group(2)
110 result = bing_search(query)
111 if result:
112 bot.say(result)
113 else:
114 bot.reply("No results found for '%s'." % query)
115
116
117 @commands('search')
118 @example('.search sopel irc bot', r'(https?:\/\/sopel\.chat\/? \(b, d\)|https?:\/\/sopel\.chat\/? \(b\), https?:\/\/sopel\.chat\/? \(d\))', re=True)
119 def search(bot, trigger):
120 """Searches Bing and Duck Duck Go."""
121 if not trigger.group(2):
122 return bot.reply('.search for what?')
123 query = trigger.group(2)
124 bu = bing_search(query) or '-'
125 du = duck_search(query) or '-'
126
127 if bu == du:
128 result = '%s (b, d)' % bu
129 else:
130 if len(bu) > 150:
131 bu = '(extremely long link)'
132 if len(du) > 150:
133 du = '(extremely long link)'
134 result = '%s (b), %s (d)' % (bu, du)
135
136 bot.reply(result)
137
138
139 @commands('suggest')
140 @example('.suggest wikip', 'wikipedia')
141 @example('.suggest ', 'No query term.')
142 @example('.suggest lkashdfiauwgeaef', 'Sorry, no result.')
143 def suggest(bot, trigger):
144 """Suggest terms starting with given input"""
145 if not trigger.group(2):
146 return bot.reply("No query term.")
147 query = trigger.group(2)
148 # Using Google isn't necessarily ideal, but at most they'll be able to build
149 # a composite profile of all users on a given instance, not a profile of any
150 # single user. This can be switched out as soon as someone finds (or builds)
151 # an alternative suggestion API.
152 uri = 'https://suggestqueries.google.com/complete/search?output=toolbar&hl=en&q='
153 answer = xmltodict.parse(requests.get(uri + query.replace('+', '%2B')).text)['toplevel']
154 try:
155 answer = answer['CompleteSuggestion'][0]['suggestion']['@data']
156 except TypeError:
157 answer = None
158 if answer:
159 bot.say(answer)
160 else:
161 bot.reply('Sorry, no result.')
162
[end of sopel/modules/search.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sopel/modules/search.py b/sopel/modules/search.py
--- a/sopel/modules/search.py
+++ b/sopel/modules/search.py
@@ -12,7 +12,8 @@
import sys
if sys.version_info.major < 3:
- from urllib import quote_plus, unquote
+ from urllib import quote_plus, unquote as _unquote
+ unquote = lambda s: _unquote(s.encode('utf-8')).decode('utf-8')
else:
from urllib.parse import quote_plus, unquote
@@ -76,6 +77,9 @@
@commands('duck', 'ddg', 'g')
+# test for bad Unicode handling in py2
+@example('.duck grandorder.wiki chulainn alter', 'https://grandorder.wiki/Cú_Chulainn_(Alter)')
+# the last example is what .help displays
@example('.duck sopel irc bot', r'https?:\/\/sopel\.chat\/?', re=True)
def duck(bot, trigger):
"""Queries Duck Duck Go for the specified input."""
| {"golden_diff": "diff --git a/sopel/modules/search.py b/sopel/modules/search.py\n--- a/sopel/modules/search.py\n+++ b/sopel/modules/search.py\n@@ -12,7 +12,8 @@\n import sys\n \n if sys.version_info.major < 3:\n- from urllib import quote_plus, unquote\n+ from urllib import quote_plus, unquote as _unquote\n+ unquote = lambda s: _unquote(s.encode('utf-8')).decode('utf-8')\n else:\n from urllib.parse import quote_plus, unquote\n \n@@ -76,6 +77,9 @@\n \n \n @commands('duck', 'ddg', 'g')\n+# test for bad Unicode handling in py2\n+@example('.duck grandorder.wiki chulainn alter', 'https://grandorder.wiki/C\u00fa_Chulainn_(Alter)')\n+# the last example is what .help displays\n @example('.duck sopel irc bot', r'https?:\\/\\/sopel\\.chat\\/?', re=True)\n def duck(bot, trigger):\n \"\"\"Queries Duck Duck Go for the specified input.\"\"\"\n", "issue": "search: Unicode URLs get mojibake'd under Python 2\nBehold, the curse of bad Unicode handling:\r\n\r\n````\r\n23:33:27 <~dgw> ;g grandorder.wiki chulainn alter\r\n23:33:28 <&Kaede> dgw: https://grandorder.wiki/C\u00c3\u00ba_Chulainn_(Alter)\r\n````\r\n\r\nVersus the glory of Python 3:\r\n\r\n````\r\n01:00:10 <~dgw> .g grandorder.wiki chulainn alter\r\n01:00:11 <Sopel> dgw: https://grandorder.wiki/C\u00fa_Chulainn_(Alter)\r\n````\r\n\r\nThanks to @prahulkgp for triggering this error, so I could find it. I've already added a test case using this exact query locally, both to verify the fix and to see that it doesn't break later. (Here's hoping it doesn't start bugging out Travis builds like the existing Bing tests sometimes do.)\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2008-9, Sean B. Palmer, inamidst.com\n# Copyright 2012, Elsie Powell, embolalia.com\n# Licensed under the Eiffel Forum License 2.\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport re\nfrom sopel import web\nfrom sopel.module import commands, example\nimport requests\nimport xmltodict\nimport sys\n\nif sys.version_info.major < 3:\n from urllib import quote_plus, unquote\nelse:\n from urllib.parse import quote_plus, unquote\n\n\ndef formatnumber(n):\n \"\"\"Format a number with beautiful commas.\"\"\"\n parts = list(str(n))\n for i in range((len(parts) - 3), 0, -3):\n parts.insert(i, ',')\n return ''.join(parts)\n\n\nr_bing = re.compile(r'<h2(?: class=\" b_topTitle\")?><a href=\"([^\"]+)\"')\n\n\ndef bing_search(query, lang='en-US'):\n base = 'https://www.bing.com/search?mkt=%s&q=' % lang\n bytes = requests.get(base + query).text\n m = r_bing.search(bytes)\n if m:\n return m.group(1)\n\n\nr_duck = re.compile(r'nofollow\" class=\"[^\"]+\" href=\"(?!(?:https?:\\/\\/r\\.search\\.yahoo)|(?:https?:\\/\\/duckduckgo\\.com\\/y\\.js)(?:\\/l\\/\\?kh=-1&uddg=))(.*?)\">')\n\n\ndef duck_search(query):\n query = query.replace('!', '')\n uri = 'https://duckduckgo.com/html/?q=%s&kl=us-en' % query\n bytes = requests.get(uri, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}).text\n if 'web-result' in bytes: # filter out the adds on top of the page\n bytes = bytes.split('web-result')[1]\n m = r_duck.search(bytes)\n if m:\n unquoted_m = unquote(m.group(1))\n return web.decode(unquoted_m)\n\n\n# Alias google_search to duck_search\ngoogle_search = duck_search\n\n\ndef duck_api(query):\n if '!bang' in query.lower():\n return 'https://duckduckgo.com/bang.html'\n\n # This fixes issue #885 (https://github.com/sopel-irc/sopel/issues/885)\n # It seems that duckduckgo api redirects to its Instant answer API html page\n # if the query constains special charactares that aren't urlencoded.\n # So in order to always get a JSON response back the query is urlencoded\n query = quote_plus(query)\n uri = 'https://api.duckduckgo.com/?q=%s&format=json&no_html=1&no_redirect=1' % query\n try:\n results = requests.get(uri).json()\n except ValueError:\n return None\n if results['Redirect']:\n return results['Redirect']\n else:\n return None\n\n\n@commands('duck', 'ddg', 'g')\n@example('.duck sopel irc bot', r'https?:\\/\\/sopel\\.chat\\/?', re=True)\ndef duck(bot, trigger):\n \"\"\"Queries Duck Duck Go for the specified input.\"\"\"\n query = trigger.group(2)\n if not query:\n return bot.reply('.ddg what?')\n\n # If the API gives us something, say it and stop\n result = duck_api(query)\n if result:\n bot.reply(result)\n return\n\n # Otherwise, look it up on the HTMl version\n uri = duck_search(query)\n\n if uri:\n bot.reply(uri)\n if 'last_seen_url' in bot.memory:\n bot.memory['last_seen_url'][trigger.sender] = uri\n else:\n bot.reply(\"No results found for '%s'.\" % query)\n\n\n@commands('bing')\n@example('.bing sopel irc bot', r'https?:\\/\\/sopel\\.chat\\/?', re=True)\ndef bing(bot, trigger):\n \"\"\"Queries Bing for the specified input.\"\"\"\n if not trigger.group(2):\n return bot.reply('.bing what?')\n query = trigger.group(2)\n result = bing_search(query)\n if result:\n bot.say(result)\n else:\n bot.reply(\"No results found for '%s'.\" % query)\n\n\n@commands('search')\n@example('.search sopel irc bot', r'(https?:\\/\\/sopel\\.chat\\/? \\(b, d\\)|https?:\\/\\/sopel\\.chat\\/? \\(b\\), https?:\\/\\/sopel\\.chat\\/? \\(d\\))', re=True)\ndef search(bot, trigger):\n \"\"\"Searches Bing and Duck Duck Go.\"\"\"\n if not trigger.group(2):\n return bot.reply('.search for what?')\n query = trigger.group(2)\n bu = bing_search(query) or '-'\n du = duck_search(query) or '-'\n\n if bu == du:\n result = '%s (b, d)' % bu\n else:\n if len(bu) > 150:\n bu = '(extremely long link)'\n if len(du) > 150:\n du = '(extremely long link)'\n result = '%s (b), %s (d)' % (bu, du)\n\n bot.reply(result)\n\n\n@commands('suggest')\n@example('.suggest wikip', 'wikipedia')\n@example('.suggest ', 'No query term.')\n@example('.suggest lkashdfiauwgeaef', 'Sorry, no result.')\ndef suggest(bot, trigger):\n \"\"\"Suggest terms starting with given input\"\"\"\n if not trigger.group(2):\n return bot.reply(\"No query term.\")\n query = trigger.group(2)\n # Using Google isn't necessarily ideal, but at most they'll be able to build\n # a composite profile of all users on a given instance, not a profile of any\n # single user. This can be switched out as soon as someone finds (or builds)\n # an alternative suggestion API.\n uri = 'https://suggestqueries.google.com/complete/search?output=toolbar&hl=en&q='\n answer = xmltodict.parse(requests.get(uri + query.replace('+', '%2B')).text)['toplevel']\n try:\n answer = answer['CompleteSuggestion'][0]['suggestion']['@data']\n except TypeError:\n answer = None\n if answer:\n bot.say(answer)\n else:\n bot.reply('Sorry, no result.')\n", "path": "sopel/modules/search.py"}]} | 2,635 | 239 |
gh_patches_debug_24452 | rasdani/github-patches | git_diff | scrapy__scrapy-4532 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fail or warn if from_crawler() returns None
## Summary
Generate a warning or error if from_crawler() for a middleware/extension/etc. returns None
## Motivation
I created a custom extension and connected signals in the from_crawler() classmethod, but neglected to return the new extension instance. Scrapy still reported the extension under "Enabled extensions", but none of the signals worked, since the instance was immediately garbage collected and its signals were silently disconnected.
This was of course an error on my part, but it would have saved me a lot of debugging if I had gotten a warning that from_crawler() was returning None, or if the extension were removed from the "Enabled extensions" list.
Would it be appropriate for utils.misc.create_instance() to raise an error or generate a warning if it's about to return None? Or should MiddlewareManager treat create_instance() returning None the same as create_instance() raising NotConfigured?
</issue>
<code>
[start of scrapy/utils/misc.py]
1 """Helper functions which don't fit anywhere else"""
2 import ast
3 import inspect
4 import os
5 import re
6 import hashlib
7 import warnings
8 from contextlib import contextmanager
9 from importlib import import_module
10 from pkgutil import iter_modules
11 from textwrap import dedent
12
13 from w3lib.html import replace_entities
14
15 from scrapy.utils.datatypes import LocalWeakReferencedCache
16 from scrapy.utils.python import flatten, to_unicode
17 from scrapy.item import BaseItem
18
19
20 _ITERABLE_SINGLE_VALUES = dict, BaseItem, str, bytes
21
22
23 def arg_to_iter(arg):
24 """Convert an argument to an iterable. The argument can be a None, single
25 value, or an iterable.
26
27 Exception: if arg is a dict, [arg] will be returned
28 """
29 if arg is None:
30 return []
31 elif not isinstance(arg, _ITERABLE_SINGLE_VALUES) and hasattr(arg, '__iter__'):
32 return arg
33 else:
34 return [arg]
35
36
37 def load_object(path):
38 """Load an object given its absolute object path, and return it.
39
40 object can be the import path of a class, function, variable or an
41 instance, e.g. 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware'
42 """
43
44 try:
45 dot = path.rindex('.')
46 except ValueError:
47 raise ValueError("Error loading object '%s': not a full path" % path)
48
49 module, name = path[:dot], path[dot + 1:]
50 mod = import_module(module)
51
52 try:
53 obj = getattr(mod, name)
54 except AttributeError:
55 raise NameError("Module '%s' doesn't define any object named '%s'" % (module, name))
56
57 return obj
58
59
60 def walk_modules(path):
61 """Loads a module and all its submodules from the given module path and
62 returns them. If *any* module throws an exception while importing, that
63 exception is thrown back.
64
65 For example: walk_modules('scrapy.utils')
66 """
67
68 mods = []
69 mod = import_module(path)
70 mods.append(mod)
71 if hasattr(mod, '__path__'):
72 for _, subpath, ispkg in iter_modules(mod.__path__):
73 fullpath = path + '.' + subpath
74 if ispkg:
75 mods += walk_modules(fullpath)
76 else:
77 submod = import_module(fullpath)
78 mods.append(submod)
79 return mods
80
81
82 def extract_regex(regex, text, encoding='utf-8'):
83 """Extract a list of unicode strings from the given text/encoding using the following policies:
84
85 * if the regex contains a named group called "extract" that will be returned
86 * if the regex contains multiple numbered groups, all those will be returned (flattened)
87 * if the regex doesn't contain any group the entire regex matching is returned
88 """
89
90 if isinstance(regex, str):
91 regex = re.compile(regex, re.UNICODE)
92
93 try:
94 strings = [regex.search(text).group('extract')] # named group
95 except Exception:
96 strings = regex.findall(text) # full regex or numbered groups
97 strings = flatten(strings)
98
99 if isinstance(text, str):
100 return [replace_entities(s, keep=['lt', 'amp']) for s in strings]
101 else:
102 return [replace_entities(to_unicode(s, encoding), keep=['lt', 'amp'])
103 for s in strings]
104
105
106 def md5sum(file):
107 """Calculate the md5 checksum of a file-like object without reading its
108 whole content in memory.
109
110 >>> from io import BytesIO
111 >>> md5sum(BytesIO(b'file content to hash'))
112 '784406af91dd5a54fbb9c84c2236595a'
113 """
114 m = hashlib.md5()
115 while True:
116 d = file.read(8096)
117 if not d:
118 break
119 m.update(d)
120 return m.hexdigest()
121
122
123 def rel_has_nofollow(rel):
124 """Return True if link rel attribute has nofollow type"""
125 return rel is not None and 'nofollow' in rel.split()
126
127
128 def create_instance(objcls, settings, crawler, *args, **kwargs):
129 """Construct a class instance using its ``from_crawler`` or
130 ``from_settings`` constructors, if available.
131
132 At least one of ``settings`` and ``crawler`` needs to be different from
133 ``None``. If ``settings `` is ``None``, ``crawler.settings`` will be used.
134 If ``crawler`` is ``None``, only the ``from_settings`` constructor will be
135 tried.
136
137 ``*args`` and ``**kwargs`` are forwarded to the constructors.
138
139 Raises ``ValueError`` if both ``settings`` and ``crawler`` are ``None``.
140 """
141 if settings is None:
142 if crawler is None:
143 raise ValueError("Specify at least one of settings and crawler.")
144 settings = crawler.settings
145 if crawler and hasattr(objcls, 'from_crawler'):
146 return objcls.from_crawler(crawler, *args, **kwargs)
147 elif hasattr(objcls, 'from_settings'):
148 return objcls.from_settings(settings, *args, **kwargs)
149 else:
150 return objcls(*args, **kwargs)
151
152
153 @contextmanager
154 def set_environ(**kwargs):
155 """Temporarily set environment variables inside the context manager and
156 fully restore previous environment afterwards
157 """
158
159 original_env = {k: os.environ.get(k) for k in kwargs}
160 os.environ.update(kwargs)
161 try:
162 yield
163 finally:
164 for k, v in original_env.items():
165 if v is None:
166 del os.environ[k]
167 else:
168 os.environ[k] = v
169
170
171 _generator_callbacks_cache = LocalWeakReferencedCache(limit=128)
172
173
174 def is_generator_with_return_value(callable):
175 """
176 Returns True if a callable is a generator function which includes a
177 'return' statement with a value different than None, False otherwise
178 """
179 if callable in _generator_callbacks_cache:
180 return _generator_callbacks_cache[callable]
181
182 def returns_none(return_node):
183 value = return_node.value
184 return value is None or isinstance(value, ast.NameConstant) and value.value is None
185
186 if inspect.isgeneratorfunction(callable):
187 tree = ast.parse(dedent(inspect.getsource(callable)))
188 for node in ast.walk(tree):
189 if isinstance(node, ast.Return) and not returns_none(node):
190 _generator_callbacks_cache[callable] = True
191 return _generator_callbacks_cache[callable]
192
193 _generator_callbacks_cache[callable] = False
194 return _generator_callbacks_cache[callable]
195
196
197 def warn_on_generator_with_return_value(spider, callable):
198 """
199 Logs a warning if a callable is a generator function and includes
200 a 'return' statement with a value different than None
201 """
202 if is_generator_with_return_value(callable):
203 warnings.warn(
204 'The "{}.{}" method is a generator and includes a "return" statement with a '
205 'value different than None. This could lead to unexpected behaviour. Please see '
206 'https://docs.python.org/3/reference/simple_stmts.html#the-return-statement '
207 'for details about the semantics of the "return" statement within generators'
208 .format(spider.__class__.__name__, callable.__name__), stacklevel=2,
209 )
210
[end of scrapy/utils/misc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scrapy/utils/misc.py b/scrapy/utils/misc.py
--- a/scrapy/utils/misc.py
+++ b/scrapy/utils/misc.py
@@ -137,17 +137,26 @@
``*args`` and ``**kwargs`` are forwarded to the constructors.
Raises ``ValueError`` if both ``settings`` and ``crawler`` are ``None``.
+
+ Raises ``TypeError`` if the resulting instance is ``None`` (e.g. if an
+ extension has not been implemented correctly).
"""
if settings is None:
if crawler is None:
raise ValueError("Specify at least one of settings and crawler.")
settings = crawler.settings
if crawler and hasattr(objcls, 'from_crawler'):
- return objcls.from_crawler(crawler, *args, **kwargs)
+ instance = objcls.from_crawler(crawler, *args, **kwargs)
+ method_name = 'from_crawler'
elif hasattr(objcls, 'from_settings'):
- return objcls.from_settings(settings, *args, **kwargs)
+ instance = objcls.from_settings(settings, *args, **kwargs)
+ method_name = 'from_settings'
else:
- return objcls(*args, **kwargs)
+ instance = objcls(*args, **kwargs)
+ method_name = '__new__'
+ if instance is None:
+ raise TypeError("%s.%s returned None" % (objcls.__qualname__, method_name))
+ return instance
@contextmanager
| {"golden_diff": "diff --git a/scrapy/utils/misc.py b/scrapy/utils/misc.py\n--- a/scrapy/utils/misc.py\n+++ b/scrapy/utils/misc.py\n@@ -137,17 +137,26 @@\n ``*args`` and ``**kwargs`` are forwarded to the constructors.\n \n Raises ``ValueError`` if both ``settings`` and ``crawler`` are ``None``.\n+\n+ Raises ``TypeError`` if the resulting instance is ``None`` (e.g. if an\n+ extension has not been implemented correctly).\n \"\"\"\n if settings is None:\n if crawler is None:\n raise ValueError(\"Specify at least one of settings and crawler.\")\n settings = crawler.settings\n if crawler and hasattr(objcls, 'from_crawler'):\n- return objcls.from_crawler(crawler, *args, **kwargs)\n+ instance = objcls.from_crawler(crawler, *args, **kwargs)\n+ method_name = 'from_crawler'\n elif hasattr(objcls, 'from_settings'):\n- return objcls.from_settings(settings, *args, **kwargs)\n+ instance = objcls.from_settings(settings, *args, **kwargs)\n+ method_name = 'from_settings'\n else:\n- return objcls(*args, **kwargs)\n+ instance = objcls(*args, **kwargs)\n+ method_name = '__new__'\n+ if instance is None:\n+ raise TypeError(\"%s.%s returned None\" % (objcls.__qualname__, method_name))\n+ return instance\n \n \n @contextmanager\n", "issue": "Fail or warn if from_crawler() returns None\n## Summary\r\n\r\nGenerate a warning or error if from_crawler() for a middleware/extension/etc. returns None\r\n\r\n## Motivation\r\n\r\nI created a custom extension and connected signals in the from_crawler() classmethod, but neglected to return the new extension instance. Scrapy still reported the extension under \"Enabled extensions\", but none of the signals worked, since the instance was immediately garbage collected and its signals were silently disconnected.\r\n\r\nThis was of course an error on my part, but it would have saved me a lot of debugging if I had gotten a warning that from_crawler() was returning None, or if the extension were removed from the \"Enabled extensions\" list.\r\n\r\nWould it be appropriate for utils.misc.create_instance() to raise an error or generate a warning if it's about to return None? Or should MiddlewareManager treat create_instance() returning None the same as create_instance() raising NotConfigured?\n", "before_files": [{"content": "\"\"\"Helper functions which don't fit anywhere else\"\"\"\nimport ast\nimport inspect\nimport os\nimport re\nimport hashlib\nimport warnings\nfrom contextlib import contextmanager\nfrom importlib import import_module\nfrom pkgutil import iter_modules\nfrom textwrap import dedent\n\nfrom w3lib.html import replace_entities\n\nfrom scrapy.utils.datatypes import LocalWeakReferencedCache\nfrom scrapy.utils.python import flatten, to_unicode\nfrom scrapy.item import BaseItem\n\n\n_ITERABLE_SINGLE_VALUES = dict, BaseItem, str, bytes\n\n\ndef arg_to_iter(arg):\n \"\"\"Convert an argument to an iterable. The argument can be a None, single\n value, or an iterable.\n\n Exception: if arg is a dict, [arg] will be returned\n \"\"\"\n if arg is None:\n return []\n elif not isinstance(arg, _ITERABLE_SINGLE_VALUES) and hasattr(arg, '__iter__'):\n return arg\n else:\n return [arg]\n\n\ndef load_object(path):\n \"\"\"Load an object given its absolute object path, and return it.\n\n object can be the import path of a class, function, variable or an\n instance, e.g. 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware'\n \"\"\"\n\n try:\n dot = path.rindex('.')\n except ValueError:\n raise ValueError(\"Error loading object '%s': not a full path\" % path)\n\n module, name = path[:dot], path[dot + 1:]\n mod = import_module(module)\n\n try:\n obj = getattr(mod, name)\n except AttributeError:\n raise NameError(\"Module '%s' doesn't define any object named '%s'\" % (module, name))\n\n return obj\n\n\ndef walk_modules(path):\n \"\"\"Loads a module and all its submodules from the given module path and\n returns them. If *any* module throws an exception while importing, that\n exception is thrown back.\n\n For example: walk_modules('scrapy.utils')\n \"\"\"\n\n mods = []\n mod = import_module(path)\n mods.append(mod)\n if hasattr(mod, '__path__'):\n for _, subpath, ispkg in iter_modules(mod.__path__):\n fullpath = path + '.' + subpath\n if ispkg:\n mods += walk_modules(fullpath)\n else:\n submod = import_module(fullpath)\n mods.append(submod)\n return mods\n\n\ndef extract_regex(regex, text, encoding='utf-8'):\n \"\"\"Extract a list of unicode strings from the given text/encoding using the following policies:\n\n * if the regex contains a named group called \"extract\" that will be returned\n * if the regex contains multiple numbered groups, all those will be returned (flattened)\n * if the regex doesn't contain any group the entire regex matching is returned\n \"\"\"\n\n if isinstance(regex, str):\n regex = re.compile(regex, re.UNICODE)\n\n try:\n strings = [regex.search(text).group('extract')] # named group\n except Exception:\n strings = regex.findall(text) # full regex or numbered groups\n strings = flatten(strings)\n\n if isinstance(text, str):\n return [replace_entities(s, keep=['lt', 'amp']) for s in strings]\n else:\n return [replace_entities(to_unicode(s, encoding), keep=['lt', 'amp'])\n for s in strings]\n\n\ndef md5sum(file):\n \"\"\"Calculate the md5 checksum of a file-like object without reading its\n whole content in memory.\n\n >>> from io import BytesIO\n >>> md5sum(BytesIO(b'file content to hash'))\n '784406af91dd5a54fbb9c84c2236595a'\n \"\"\"\n m = hashlib.md5()\n while True:\n d = file.read(8096)\n if not d:\n break\n m.update(d)\n return m.hexdigest()\n\n\ndef rel_has_nofollow(rel):\n \"\"\"Return True if link rel attribute has nofollow type\"\"\"\n return rel is not None and 'nofollow' in rel.split()\n\n\ndef create_instance(objcls, settings, crawler, *args, **kwargs):\n \"\"\"Construct a class instance using its ``from_crawler`` or\n ``from_settings`` constructors, if available.\n\n At least one of ``settings`` and ``crawler`` needs to be different from\n ``None``. If ``settings `` is ``None``, ``crawler.settings`` will be used.\n If ``crawler`` is ``None``, only the ``from_settings`` constructor will be\n tried.\n\n ``*args`` and ``**kwargs`` are forwarded to the constructors.\n\n Raises ``ValueError`` if both ``settings`` and ``crawler`` are ``None``.\n \"\"\"\n if settings is None:\n if crawler is None:\n raise ValueError(\"Specify at least one of settings and crawler.\")\n settings = crawler.settings\n if crawler and hasattr(objcls, 'from_crawler'):\n return objcls.from_crawler(crawler, *args, **kwargs)\n elif hasattr(objcls, 'from_settings'):\n return objcls.from_settings(settings, *args, **kwargs)\n else:\n return objcls(*args, **kwargs)\n\n\n@contextmanager\ndef set_environ(**kwargs):\n \"\"\"Temporarily set environment variables inside the context manager and\n fully restore previous environment afterwards\n \"\"\"\n\n original_env = {k: os.environ.get(k) for k in kwargs}\n os.environ.update(kwargs)\n try:\n yield\n finally:\n for k, v in original_env.items():\n if v is None:\n del os.environ[k]\n else:\n os.environ[k] = v\n\n\n_generator_callbacks_cache = LocalWeakReferencedCache(limit=128)\n\n\ndef is_generator_with_return_value(callable):\n \"\"\"\n Returns True if a callable is a generator function which includes a\n 'return' statement with a value different than None, False otherwise\n \"\"\"\n if callable in _generator_callbacks_cache:\n return _generator_callbacks_cache[callable]\n\n def returns_none(return_node):\n value = return_node.value\n return value is None or isinstance(value, ast.NameConstant) and value.value is None\n\n if inspect.isgeneratorfunction(callable):\n tree = ast.parse(dedent(inspect.getsource(callable)))\n for node in ast.walk(tree):\n if isinstance(node, ast.Return) and not returns_none(node):\n _generator_callbacks_cache[callable] = True\n return _generator_callbacks_cache[callable]\n\n _generator_callbacks_cache[callable] = False\n return _generator_callbacks_cache[callable]\n\n\ndef warn_on_generator_with_return_value(spider, callable):\n \"\"\"\n Logs a warning if a callable is a generator function and includes\n a 'return' statement with a value different than None\n \"\"\"\n if is_generator_with_return_value(callable):\n warnings.warn(\n 'The \"{}.{}\" method is a generator and includes a \"return\" statement with a '\n 'value different than None. This could lead to unexpected behaviour. Please see '\n 'https://docs.python.org/3/reference/simple_stmts.html#the-return-statement '\n 'for details about the semantics of the \"return\" statement within generators'\n .format(spider.__class__.__name__, callable.__name__), stacklevel=2,\n )\n", "path": "scrapy/utils/misc.py"}]} | 2,839 | 331 |
gh_patches_debug_25169 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-617 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Implement adding a new empty table
## Problem
<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->
Users may want to create an empty table.
## Proposed solution
<!-- A clear and concise description of your proposed solution or feature. -->
The ["Basic Table Operations" design spec](https://wiki.mathesar.org/design/specs/table-operations) has a solution for this, which we need to implement on the frontend. We will be implementing a different design than the one in the spec, which will need to be improvised.
This issue involves the following portions of the spec:
- _User Experience_:
- User adds a new table
- User edits a new table name
- User adds an empty table
- _Interactions_:
- Sequential Table Names
Please note that we'll need to generate and display a default name of the table that the user can use. Following the logic in #449 is recommended.
## Additional context
<!-- Add any other context or screenshots about the feature request here.-->
- Backend work: #184
- Design issue: #185
- #449 is related.
Logs of conversation on Matrix from @pavish:
> For adding an empty table, instead of showing Empty table option within the tab, I think it might be better to show it on the top.
>
> The Add new table button can be changed to a dropdown with two options. 1. Empty table, 2. Import data.
>
> On clicking empty table, it would just send a create table request and create the table. Then the user can rename it if they want.
>
> This will have clean separation of concerns. Because Import data, involves importing from csv, remote file import and then copy/paste from spreadsheet..
>
> Empty table, would just create a new empty table.
</issue>
<code>
[start of mathesar/api/viewsets/tables.py]
1 from django_filters import rest_framework as filters
2 from psycopg2.errors import CheckViolation, DuplicateTable, InvalidTextRepresentation
3 from rest_framework import status, viewsets
4 from rest_framework.decorators import action
5 from rest_framework.exceptions import ValidationError, APIException
6 from rest_framework.mixins import ListModelMixin, RetrieveModelMixin
7 from rest_framework.response import Response
8 from sqlalchemy.exc import ProgrammingError, DataError, IntegrityError
9
10 from db.types.alteration import UnsupportedTypeException
11 from mathesar.api.filters import TableFilter
12 from mathesar.api.pagination import DefaultLimitOffsetPagination
13 from mathesar.api.serializers.tables import TableSerializer, TablePreviewSerializer
14 from mathesar.models import Table
15 from mathesar.utils.tables import (
16 get_table_column_types, create_table_from_datafile, create_empty_table,
17 gen_table_name
18 )
19
20
21 class TableViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin):
22 serializer_class = TableSerializer
23 pagination_class = DefaultLimitOffsetPagination
24 filter_backends = (filters.DjangoFilterBackend,)
25 filterset_class = TableFilter
26
27 def get_queryset(self):
28 return Table.objects.all().order_by('-created_at')
29
30 def create(self, request):
31 serializer = TableSerializer(data=request.data, context={'request': request})
32 serializer.is_valid(raise_exception=True)
33
34 if not serializer.validated_data['name']:
35 name = gen_table_name(
36 serializer.validated_data['schema'],
37 serializer.validated_data['data_files'],
38 )
39 else:
40 name = serializer.validated_data['name']
41
42 try:
43 if serializer.validated_data['data_files']:
44 table = create_table_from_datafile(
45 serializer.validated_data['data_files'],
46 name,
47 serializer.validated_data['schema'],
48 )
49 else:
50 table = create_empty_table(
51 name,
52 serializer.validated_data['schema']
53 )
54 except ProgrammingError as e:
55 if type(e.orig) == DuplicateTable:
56 raise ValidationError(
57 f"Relation {request.data['name']} already exists in schema {request.data['schema']}"
58 )
59 else:
60 raise APIException(e)
61
62 serializer = TableSerializer(table, context={'request': request})
63 return Response(serializer.data, status=status.HTTP_201_CREATED)
64
65 def partial_update(self, request, pk=None):
66 serializer = TableSerializer(
67 data=request.data, context={'request': request}, partial=True
68 )
69 serializer.is_valid(raise_exception=True)
70 table = self.get_object()
71
72 # Save the fields that are stored in the model.
73 present_model_fields = []
74 for model_field in table.MODEL_FIELDS:
75 if model_field in serializer.validated_data:
76 setattr(table, model_field, serializer.validated_data[model_field])
77 present_model_fields.append(model_field)
78 table.save(update_fields=present_model_fields)
79 for key in present_model_fields:
80 del serializer.validated_data[key]
81
82 # Save the fields that are stored in the underlying DB.
83 try:
84 table.update_sa_table(serializer.validated_data)
85 except ValueError as e:
86 raise ValidationError(e)
87
88 # Reload the table to avoid cached properties
89 table = self.get_object()
90 serializer = TableSerializer(table, context={'request': request})
91 return Response(serializer.data)
92
93 def destroy(self, request, pk=None):
94 table = self.get_object()
95 table.delete_sa_table()
96 table.delete()
97 return Response(status=status.HTTP_204_NO_CONTENT)
98
99 @action(methods=['get'], detail=True)
100 def type_suggestions(self, request, pk=None):
101 table = self.get_object()
102 col_types = get_table_column_types(table)
103 return Response(col_types)
104
105 @action(methods=['post'], detail=True)
106 def previews(self, request, pk=None):
107 table = self.get_object()
108 serializer = TablePreviewSerializer(data=request.data)
109 serializer.is_valid(raise_exception=True)
110 columns = serializer.data["columns"]
111
112 column_names = [col["name"] for col in columns]
113 if not len(column_names) == len(set(column_names)):
114 raise ValidationError("Column names must be distinct")
115 if not len(columns) == len(table.sa_columns):
116 raise ValidationError("Incorrect number of columns in request.")
117
118 table_data = TableSerializer(table, context={"request": request}).data
119 try:
120 preview_records = table.get_preview(columns)
121 except (DataError, IntegrityError) as e:
122 if type(e.orig) == InvalidTextRepresentation or type(e.orig) == CheckViolation:
123 raise ValidationError("Invalid type cast requested.")
124 else:
125 raise APIException
126 except UnsupportedTypeException as e:
127 raise ValidationError(e)
128 except Exception as e:
129 raise APIException(e)
130
131 table_data.update(
132 {
133 # There's no way to reflect actual column data without
134 # creating a view, so we just use the submission, assuming
135 # no errors means we changed to the desired names and types
136 "columns": columns,
137 "records": preview_records
138 }
139 )
140
141 return Response(table_data)
142
[end of mathesar/api/viewsets/tables.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mathesar/api/viewsets/tables.py b/mathesar/api/viewsets/tables.py
--- a/mathesar/api/viewsets/tables.py
+++ b/mathesar/api/viewsets/tables.py
@@ -31,26 +31,15 @@
serializer = TableSerializer(data=request.data, context={'request': request})
serializer.is_valid(raise_exception=True)
- if not serializer.validated_data['name']:
- name = gen_table_name(
- serializer.validated_data['schema'],
- serializer.validated_data['data_files'],
- )
- else:
- name = serializer.validated_data['name']
+ schema = serializer.validated_data['schema']
+ data_files = serializer.validated_data.get('data_files')
+ name = serializer.validated_data.get('name') or gen_table_name(schema, data_files)
try:
- if serializer.validated_data['data_files']:
- table = create_table_from_datafile(
- serializer.validated_data['data_files'],
- name,
- serializer.validated_data['schema'],
- )
+ if data_files:
+ table = create_table_from_datafile(data_files, name, schema)
else:
- table = create_empty_table(
- name,
- serializer.validated_data['schema']
- )
+ table = create_empty_table(name, schema)
except ProgrammingError as e:
if type(e.orig) == DuplicateTable:
raise ValidationError(
| {"golden_diff": "diff --git a/mathesar/api/viewsets/tables.py b/mathesar/api/viewsets/tables.py\n--- a/mathesar/api/viewsets/tables.py\n+++ b/mathesar/api/viewsets/tables.py\n@@ -31,26 +31,15 @@\n serializer = TableSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n \n- if not serializer.validated_data['name']:\n- name = gen_table_name(\n- serializer.validated_data['schema'],\n- serializer.validated_data['data_files'],\n- )\n- else:\n- name = serializer.validated_data['name']\n+ schema = serializer.validated_data['schema']\n+ data_files = serializer.validated_data.get('data_files')\n+ name = serializer.validated_data.get('name') or gen_table_name(schema, data_files)\n \n try:\n- if serializer.validated_data['data_files']:\n- table = create_table_from_datafile(\n- serializer.validated_data['data_files'],\n- name,\n- serializer.validated_data['schema'],\n- )\n+ if data_files:\n+ table = create_table_from_datafile(data_files, name, schema)\n else:\n- table = create_empty_table(\n- name,\n- serializer.validated_data['schema']\n- )\n+ table = create_empty_table(name, schema)\n except ProgrammingError as e:\n if type(e.orig) == DuplicateTable:\n raise ValidationError(\n", "issue": "Implement adding a new empty table\n## Problem\r\n<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->\r\nUsers may want to create an empty table.\r\n\r\n## Proposed solution\r\n<!-- A clear and concise description of your proposed solution or feature. -->\r\nThe [\"Basic Table Operations\" design spec](https://wiki.mathesar.org/design/specs/table-operations) has a solution for this, which we need to implement on the frontend. We will be implementing a different design than the one in the spec, which will need to be improvised.\r\n\r\nThis issue involves the following portions of the spec:\r\n- _User Experience_: \r\n - User adds a new table\r\n - User edits a new table name\r\n - User adds an empty table\r\n- _Interactions_: \r\n - Sequential Table Names\r\n \r\nPlease note that we'll need to generate and display a default name of the table that the user can use. Following the logic in #449 is recommended.\r\n\r\n## Additional context\r\n<!-- Add any other context or screenshots about the feature request here.-->\r\n- Backend work: #184 \r\n- Design issue: #185\r\n- #449 is related.\r\n\r\nLogs of conversation on Matrix from @pavish:\r\n\r\n> For adding an empty table, instead of showing Empty table option within the tab, I think it might be better to show it on the top.\r\n> \r\n> The Add new table button can be changed to a dropdown with two options. 1. Empty table, 2. Import data.\r\n> \r\n> On clicking empty table, it would just send a create table request and create the table. Then the user can rename it if they want.\r\n> \r\n> This will have clean separation of concerns. Because Import data, involves importing from csv, remote file import and then copy/paste from spreadsheet..\r\n> \r\n> Empty table, would just create a new empty table.\n", "before_files": [{"content": "from django_filters import rest_framework as filters\nfrom psycopg2.errors import CheckViolation, DuplicateTable, InvalidTextRepresentation\nfrom rest_framework import status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import ValidationError, APIException\nfrom rest_framework.mixins import ListModelMixin, RetrieveModelMixin\nfrom rest_framework.response import Response\nfrom sqlalchemy.exc import ProgrammingError, DataError, IntegrityError\n\nfrom db.types.alteration import UnsupportedTypeException\nfrom mathesar.api.filters import TableFilter\nfrom mathesar.api.pagination import DefaultLimitOffsetPagination\nfrom mathesar.api.serializers.tables import TableSerializer, TablePreviewSerializer\nfrom mathesar.models import Table\nfrom mathesar.utils.tables import (\n get_table_column_types, create_table_from_datafile, create_empty_table,\n gen_table_name\n)\n\n\nclass TableViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin):\n serializer_class = TableSerializer\n pagination_class = DefaultLimitOffsetPagination\n filter_backends = (filters.DjangoFilterBackend,)\n filterset_class = TableFilter\n\n def get_queryset(self):\n return Table.objects.all().order_by('-created_at')\n\n def create(self, request):\n serializer = TableSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n\n if not serializer.validated_data['name']:\n name = gen_table_name(\n serializer.validated_data['schema'],\n serializer.validated_data['data_files'],\n )\n else:\n name = serializer.validated_data['name']\n\n try:\n if serializer.validated_data['data_files']:\n table = create_table_from_datafile(\n serializer.validated_data['data_files'],\n name,\n serializer.validated_data['schema'],\n )\n else:\n table = create_empty_table(\n name,\n serializer.validated_data['schema']\n )\n except ProgrammingError as e:\n if type(e.orig) == DuplicateTable:\n raise ValidationError(\n f\"Relation {request.data['name']} already exists in schema {request.data['schema']}\"\n )\n else:\n raise APIException(e)\n\n serializer = TableSerializer(table, context={'request': request})\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n def partial_update(self, request, pk=None):\n serializer = TableSerializer(\n data=request.data, context={'request': request}, partial=True\n )\n serializer.is_valid(raise_exception=True)\n table = self.get_object()\n\n # Save the fields that are stored in the model.\n present_model_fields = []\n for model_field in table.MODEL_FIELDS:\n if model_field in serializer.validated_data:\n setattr(table, model_field, serializer.validated_data[model_field])\n present_model_fields.append(model_field)\n table.save(update_fields=present_model_fields)\n for key in present_model_fields:\n del serializer.validated_data[key]\n\n # Save the fields that are stored in the underlying DB.\n try:\n table.update_sa_table(serializer.validated_data)\n except ValueError as e:\n raise ValidationError(e)\n\n # Reload the table to avoid cached properties\n table = self.get_object()\n serializer = TableSerializer(table, context={'request': request})\n return Response(serializer.data)\n\n def destroy(self, request, pk=None):\n table = self.get_object()\n table.delete_sa_table()\n table.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(methods=['get'], detail=True)\n def type_suggestions(self, request, pk=None):\n table = self.get_object()\n col_types = get_table_column_types(table)\n return Response(col_types)\n\n @action(methods=['post'], detail=True)\n def previews(self, request, pk=None):\n table = self.get_object()\n serializer = TablePreviewSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n columns = serializer.data[\"columns\"]\n\n column_names = [col[\"name\"] for col in columns]\n if not len(column_names) == len(set(column_names)):\n raise ValidationError(\"Column names must be distinct\")\n if not len(columns) == len(table.sa_columns):\n raise ValidationError(\"Incorrect number of columns in request.\")\n\n table_data = TableSerializer(table, context={\"request\": request}).data\n try:\n preview_records = table.get_preview(columns)\n except (DataError, IntegrityError) as e:\n if type(e.orig) == InvalidTextRepresentation or type(e.orig) == CheckViolation:\n raise ValidationError(\"Invalid type cast requested.\")\n else:\n raise APIException\n except UnsupportedTypeException as e:\n raise ValidationError(e)\n except Exception as e:\n raise APIException(e)\n\n table_data.update(\n {\n # There's no way to reflect actual column data without\n # creating a view, so we just use the submission, assuming\n # no errors means we changed to the desired names and types\n \"columns\": columns,\n \"records\": preview_records\n }\n )\n\n return Response(table_data)\n", "path": "mathesar/api/viewsets/tables.py"}]} | 2,318 | 317 |
gh_patches_debug_54590 | rasdani/github-patches | git_diff | zulip__zulip-20491 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove "Send a reply" new user tip
After implementing #19900, there are two places where new users are told how to reply to a message: in the Welcome Bot text and in the "Send a reply" new user tip immediately below.
To simplify and avoid redundancy, we should remove the "Send a reply" new user tip.
<img width="909" alt="Screen_Shot_2021-12-06_at_10_08_14_AM" src="https://user-images.githubusercontent.com/2090066/144938995-080268ce-510d-4b76-b3c1-b691fbb814f4.png">
[CZO thread](https://chat.zulip.org/#narrow/stream/101-design/topic/.22click.20to.20reply.22.20whale)
</issue>
<code>
[start of zerver/lib/hotspots.py]
1 # See https://zulip.readthedocs.io/en/latest/subsystems/hotspots.html
2 # for documentation on this subsystem.
3 from typing import Dict, List
4
5 from django.conf import settings
6 from django.utils.functional import Promise
7 from django.utils.translation import gettext_lazy
8
9 from zerver.models import UserHotspot, UserProfile
10
11 INTRO_HOTSPOTS: Dict[str, Dict[str, Promise]] = {
12 "intro_reply": {
13 "title": gettext_lazy("Reply to a message"),
14 "description": gettext_lazy("Click anywhere on a message to reply."),
15 },
16 "intro_streams": {
17 "title": gettext_lazy("Catch up on a stream"),
18 "description": gettext_lazy(
19 "Messages sent to a stream are seen by everyone subscribed "
20 "to that stream. Try clicking on one of the stream links below."
21 ),
22 },
23 "intro_topics": {
24 "title": gettext_lazy("Topics"),
25 "description": gettext_lazy(
26 "Every message has a topic. Topics keep conversations "
27 "easy to follow, and make it easy to reply to conversations that start "
28 "while you are offline."
29 ),
30 },
31 "intro_gear": {
32 "title": gettext_lazy("Settings"),
33 "description": gettext_lazy(
34 "Go to Settings to configure your notifications and display settings."
35 ),
36 },
37 "intro_compose": {
38 "title": gettext_lazy("Compose"),
39 "description": gettext_lazy(
40 "Click here to start a new conversation. Pick a topic "
41 "(2-3 words is best), and give it a go!"
42 ),
43 },
44 }
45
46 # We would most likely implement new hotspots in the future that aren't
47 # a part of the initial tutorial. To that end, classifying them into
48 # categories which are aggregated in ALL_HOTSPOTS, seems like a good start.
49 ALL_HOTSPOTS: Dict[str, Dict[str, Promise]] = {
50 **INTRO_HOTSPOTS,
51 }
52
53
54 def get_next_hotspots(user: UserProfile) -> List[Dict[str, object]]:
55 # For manual testing, it can be convenient to set
56 # ALWAYS_SEND_ALL_HOTSPOTS=True in `zproject/dev_settings.py` to
57 # make it easy to click on all of the hotspots. Note that
58 # ALWAYS_SEND_ALL_HOTSPOTS has some bugs; see ReadTheDocs (link
59 # above) for details.
60 #
61 # Since this is just for development purposes, it's convenient for us to send
62 # all the hotspots rather than any specific category.
63 if settings.ALWAYS_SEND_ALL_HOTSPOTS:
64 return [
65 {
66 "name": hotspot,
67 "title": str(ALL_HOTSPOTS[hotspot]["title"]),
68 "description": str(ALL_HOTSPOTS[hotspot]["description"]),
69 "delay": 0,
70 }
71 for hotspot in ALL_HOTSPOTS
72 ]
73
74 # If a Zulip server has disabled the tutorial, never send hotspots.
75 if not settings.TUTORIAL_ENABLED:
76 return []
77
78 if user.tutorial_status == UserProfile.TUTORIAL_FINISHED:
79 return []
80
81 seen_hotspots = frozenset(
82 UserHotspot.objects.filter(user=user).values_list("hotspot", flat=True)
83 )
84 for hotspot in INTRO_HOTSPOTS.keys():
85 if hotspot not in seen_hotspots:
86 return [
87 {
88 "name": hotspot,
89 "title": str(INTRO_HOTSPOTS[hotspot]["title"]),
90 "description": str(INTRO_HOTSPOTS[hotspot]["description"]),
91 "delay": 0.5,
92 }
93 ]
94
95 user.tutorial_status = UserProfile.TUTORIAL_FINISHED
96 user.save(update_fields=["tutorial_status"])
97 return []
98
99
100 def copy_hotspots(source_profile: UserProfile, target_profile: UserProfile) -> None:
101 for userhotspot in frozenset(UserHotspot.objects.filter(user=source_profile)):
102 UserHotspot.objects.create(
103 user=target_profile, hotspot=userhotspot.hotspot, timestamp=userhotspot.timestamp
104 )
105
106 target_profile.tutorial_status = source_profile.tutorial_status
107 target_profile.onboarding_steps = source_profile.onboarding_steps
108 target_profile.save(update_fields=["tutorial_status", "onboarding_steps"])
109
[end of zerver/lib/hotspots.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zerver/lib/hotspots.py b/zerver/lib/hotspots.py
--- a/zerver/lib/hotspots.py
+++ b/zerver/lib/hotspots.py
@@ -9,10 +9,6 @@
from zerver.models import UserHotspot, UserProfile
INTRO_HOTSPOTS: Dict[str, Dict[str, Promise]] = {
- "intro_reply": {
- "title": gettext_lazy("Reply to a message"),
- "description": gettext_lazy("Click anywhere on a message to reply."),
- },
"intro_streams": {
"title": gettext_lazy("Catch up on a stream"),
"description": gettext_lazy(
| {"golden_diff": "diff --git a/zerver/lib/hotspots.py b/zerver/lib/hotspots.py\n--- a/zerver/lib/hotspots.py\n+++ b/zerver/lib/hotspots.py\n@@ -9,10 +9,6 @@\n from zerver.models import UserHotspot, UserProfile\n \n INTRO_HOTSPOTS: Dict[str, Dict[str, Promise]] = {\n- \"intro_reply\": {\n- \"title\": gettext_lazy(\"Reply to a message\"),\n- \"description\": gettext_lazy(\"Click anywhere on a message to reply.\"),\n- },\n \"intro_streams\": {\n \"title\": gettext_lazy(\"Catch up on a stream\"),\n \"description\": gettext_lazy(\n", "issue": "Remove \"Send a reply\" new user tip\nAfter implementing #19900, there are two places where new users are told how to reply to a message: in the Welcome Bot text and in the \"Send a reply\" new user tip immediately below.\r\n\r\nTo simplify and avoid redundancy, we should remove the \"Send a reply\" new user tip.\r\n\r\n<img width=\"909\" alt=\"Screen_Shot_2021-12-06_at_10_08_14_AM\" src=\"https://user-images.githubusercontent.com/2090066/144938995-080268ce-510d-4b76-b3c1-b691fbb814f4.png\">\r\n\r\n[CZO thread](https://chat.zulip.org/#narrow/stream/101-design/topic/.22click.20to.20reply.22.20whale)\n", "before_files": [{"content": "# See https://zulip.readthedocs.io/en/latest/subsystems/hotspots.html\n# for documentation on this subsystem.\nfrom typing import Dict, List\n\nfrom django.conf import settings\nfrom django.utils.functional import Promise\nfrom django.utils.translation import gettext_lazy\n\nfrom zerver.models import UserHotspot, UserProfile\n\nINTRO_HOTSPOTS: Dict[str, Dict[str, Promise]] = {\n \"intro_reply\": {\n \"title\": gettext_lazy(\"Reply to a message\"),\n \"description\": gettext_lazy(\"Click anywhere on a message to reply.\"),\n },\n \"intro_streams\": {\n \"title\": gettext_lazy(\"Catch up on a stream\"),\n \"description\": gettext_lazy(\n \"Messages sent to a stream are seen by everyone subscribed \"\n \"to that stream. Try clicking on one of the stream links below.\"\n ),\n },\n \"intro_topics\": {\n \"title\": gettext_lazy(\"Topics\"),\n \"description\": gettext_lazy(\n \"Every message has a topic. Topics keep conversations \"\n \"easy to follow, and make it easy to reply to conversations that start \"\n \"while you are offline.\"\n ),\n },\n \"intro_gear\": {\n \"title\": gettext_lazy(\"Settings\"),\n \"description\": gettext_lazy(\n \"Go to Settings to configure your notifications and display settings.\"\n ),\n },\n \"intro_compose\": {\n \"title\": gettext_lazy(\"Compose\"),\n \"description\": gettext_lazy(\n \"Click here to start a new conversation. Pick a topic \"\n \"(2-3 words is best), and give it a go!\"\n ),\n },\n}\n\n# We would most likely implement new hotspots in the future that aren't\n# a part of the initial tutorial. To that end, classifying them into\n# categories which are aggregated in ALL_HOTSPOTS, seems like a good start.\nALL_HOTSPOTS: Dict[str, Dict[str, Promise]] = {\n **INTRO_HOTSPOTS,\n}\n\n\ndef get_next_hotspots(user: UserProfile) -> List[Dict[str, object]]:\n # For manual testing, it can be convenient to set\n # ALWAYS_SEND_ALL_HOTSPOTS=True in `zproject/dev_settings.py` to\n # make it easy to click on all of the hotspots. Note that\n # ALWAYS_SEND_ALL_HOTSPOTS has some bugs; see ReadTheDocs (link\n # above) for details.\n #\n # Since this is just for development purposes, it's convenient for us to send\n # all the hotspots rather than any specific category.\n if settings.ALWAYS_SEND_ALL_HOTSPOTS:\n return [\n {\n \"name\": hotspot,\n \"title\": str(ALL_HOTSPOTS[hotspot][\"title\"]),\n \"description\": str(ALL_HOTSPOTS[hotspot][\"description\"]),\n \"delay\": 0,\n }\n for hotspot in ALL_HOTSPOTS\n ]\n\n # If a Zulip server has disabled the tutorial, never send hotspots.\n if not settings.TUTORIAL_ENABLED:\n return []\n\n if user.tutorial_status == UserProfile.TUTORIAL_FINISHED:\n return []\n\n seen_hotspots = frozenset(\n UserHotspot.objects.filter(user=user).values_list(\"hotspot\", flat=True)\n )\n for hotspot in INTRO_HOTSPOTS.keys():\n if hotspot not in seen_hotspots:\n return [\n {\n \"name\": hotspot,\n \"title\": str(INTRO_HOTSPOTS[hotspot][\"title\"]),\n \"description\": str(INTRO_HOTSPOTS[hotspot][\"description\"]),\n \"delay\": 0.5,\n }\n ]\n\n user.tutorial_status = UserProfile.TUTORIAL_FINISHED\n user.save(update_fields=[\"tutorial_status\"])\n return []\n\n\ndef copy_hotspots(source_profile: UserProfile, target_profile: UserProfile) -> None:\n for userhotspot in frozenset(UserHotspot.objects.filter(user=source_profile)):\n UserHotspot.objects.create(\n user=target_profile, hotspot=userhotspot.hotspot, timestamp=userhotspot.timestamp\n )\n\n target_profile.tutorial_status = source_profile.tutorial_status\n target_profile.onboarding_steps = source_profile.onboarding_steps\n target_profile.save(update_fields=[\"tutorial_status\", \"onboarding_steps\"])\n", "path": "zerver/lib/hotspots.py"}]} | 1,871 | 143 |
gh_patches_debug_9750 | rasdani/github-patches | git_diff | nilearn__nilearn-4310 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] MultiNiftiMapsMasker reports are empty
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Operating system
- [X] Linux
- [ ] Mac
- [ ] Windows
### Operating system version
For example one of the following:
- Linux Ubuntu 22.04
### Python version
- [ ] 3.12
- [X] 3.11
- [ ] 3.10
- [ ] 3.9
- [ ] 3.8
### nilearn version
dev
### Expected behavior
to have non empty reports
### Current behavior & error messages
This is what I got:


### Steps and code to reproduce bug
```python
from pathlib import Path
from nilearn import datasets
from nilearn.maskers import (
MultiNiftiMapsMasker
)
REPORTS_DIR = Path().cwd()
difumo = datasets.fetch_atlas_difumo(
dimension=64, resolution_mm=2, legacy_format=False
)
data = datasets.fetch_development_fmri(n_subjects=2)
masker = MultiNiftiMapsMasker(
maps_img=difumo.maps,
standardize="zscore_sample",
standardize_confounds="zscore_sample",
memory="nilearn_cache",
n_jobs=2,
)
masker.fit()
report = masker.generate_report()
report.save_as_html(REPORTS_DIR / "multi_nifti_maps_masker_atlas.html")
_ = masker.fit_transform(data.func, confounds=data.confounds)
report = masker.generate_report()
report.save_as_html(REPORTS_DIR / "multi_nifti_maps_masker_fitted.html")
```
</issue>
<code>
[start of nilearn/reporting/html_report.py]
1 """Generate HTML reports."""
2
3 import copy
4 import os
5 import warnings
6 from pathlib import Path
7 from string import Template
8
9 from nilearn.externals import tempita
10 from nilearn.maskers import NiftiSpheresMasker
11 from nilearn.plotting.html_document import HTMLDocument
12 from nilearn.reporting.utils import figure_to_svg_base64
13
14 ESTIMATOR_TEMPLATES = {
15 "NiftiLabelsMasker": "report_body_template_niftilabelsmasker.html",
16 "NiftiMapsMasker": "report_body_template_niftimapsmasker.html",
17 "NiftiSpheresMasker": "report_body_template_niftispheresmasker.html",
18 "default": "report_body_template.html",
19 }
20
21
22 def _get_estimator_template(estimator):
23 """Return the HTML template to use for a given estimator \
24 if a specific template was defined in ESTIMATOR_TEMPLATES, \
25 otherwise return the default template.
26
27 Parameters
28 ----------
29 estimator : object instance of BaseEstimator
30 The object we wish to retrieve template of.
31
32 Returns
33 -------
34 template : str
35 Name of the template file to use.
36
37 """
38 if estimator.__class__.__name__ in ESTIMATOR_TEMPLATES:
39 return ESTIMATOR_TEMPLATES[estimator.__class__.__name__]
40 else:
41 return ESTIMATOR_TEMPLATES["default"]
42
43
44 def _embed_img(display):
45 """Embed an image or just return its instance if already embedded.
46
47 Parameters
48 ----------
49 display : obj
50 A Nilearn plotting object to display.
51
52 Returns
53 -------
54 embed : str
55 Binary image string.
56
57 """
58 if display is None: # no image to display
59 return None
60 # If already embedded, simply return as is
61 if isinstance(display, str):
62 return display
63 return figure_to_svg_base64(display.frame_axes.figure)
64
65
66 def _str_params(params):
67 """Convert NoneType values to the string 'None' for display.
68
69 Parameters
70 ----------
71 params : dict
72 A dictionary of input values to a function.
73
74 """
75 params_str = copy.deepcopy(params)
76 for k, v in params_str.items():
77 if v is None:
78 params_str[k] = "None"
79 return params_str
80
81
82 def _update_template(
83 title, docstring, content, overlay, parameters, data, template_name=None
84 ):
85 """Populate a report with content.
86
87 Parameters
88 ----------
89 title : str
90 The title for the report.
91
92 docstring : str
93 The introductory docstring for the reported object.
94
95 content : img
96 The content to display.
97
98 overlay : img
99 Overlaid content, to appear on hover.
100
101 parameters : dict
102 A dictionary of object parameters and their values.
103
104 data : dict
105 A dictionary holding the data to be added to the report.
106 The keys must match exactly the ones used in the template.
107 The default template accepts the following:
108 - description (str) : Description of the content.
109 - warning_message (str) : An optional warning
110 message to be displayed in red. This is used
111 for example when no image was provided to the
112 estimator when fitting.
113 The NiftiLabelsMasker template accepts the additional
114 fields:
115 - summary (dict) : A summary description of the
116 region labels and sizes. This will be displayed
117 as an expandable table in the report.
118
119 template_name : str, optional
120 The name of the template to use. If not provided, the
121 default template `report_body_template.html` will be
122 used.
123
124 Returns
125 -------
126 report : HTMLReport
127 An instance of a populated HTML report.
128
129 """
130 resource_path = Path(__file__).resolve().parent.joinpath("data", "html")
131
132 if template_name is None:
133 body_template_name = "report_body_template.html"
134 else:
135 body_template_name = template_name
136 body_template_path = resource_path.joinpath(body_template_name)
137 if not os.path.exists(str(body_template_path)):
138 raise FileNotFoundError(f"No template {body_template_name}")
139 tpl = tempita.HTMLTemplate.from_filename(
140 str(body_template_path), encoding="utf-8"
141 )
142 body = tpl.substitute(
143 title=title,
144 content=content,
145 overlay=overlay,
146 docstring=docstring,
147 parameters=parameters,
148 **data,
149 )
150
151 head_template_name = "report_head_template.html"
152 head_template_path = resource_path.joinpath(head_template_name)
153 with open(str(head_template_path)) as head_file:
154 head_tpl = Template(head_file.read())
155
156 return HTMLReport(body=body, head_tpl=head_tpl)
157
158
159 def _define_overlay(estimator):
160 """Determine whether an overlay was provided and \
161 update the report text as appropriate."""
162 displays = estimator._reporting()
163
164 if len(displays) == 1: # set overlay to None
165 overlay, image = None, displays[0]
166
167 elif isinstance(estimator, NiftiSpheresMasker):
168 overlay, image = None, displays
169
170 elif len(displays) == 2:
171 overlay, image = displays[0], displays[1]
172
173 else:
174 overlay, image = None, displays
175
176 return overlay, image
177
178
179 def generate_report(estimator):
180 """Generate a report for Nilearn objects.
181
182 Reports are useful to visualize steps in a processing pipeline.
183 Example use case: visualize the overlap of a mask and reference image
184 in NiftiMasker.
185
186 Parameters
187 ----------
188 estimator : Object instance of BaseEstimator.
189 Object for which the report should be generated.
190
191 Returns
192 -------
193 report : HTMLReport
194
195 """
196 if hasattr(estimator, "_report_content"):
197 data = estimator._report_content
198 else:
199 data = {}
200 if not hasattr(estimator, "_reporting_data"):
201 warnings.warn(
202 "This object has not been fitted yet ! "
203 "Make sure to run `fit` before inspecting reports."
204 )
205 return _update_template(
206 title="Empty Report",
207 docstring=(
208 "This report was not generated. Please `fit` the object."
209 ),
210 content=_embed_img(None),
211 overlay=None,
212 parameters={},
213 data=data,
214 )
215
216 elif estimator._reporting_data is None:
217 warnings.warn(
218 "Report generation not enabled ! "
219 "No visual outputs will be created."
220 )
221 return _update_template(
222 title="Empty Report",
223 docstring=(
224 "This report was not "
225 "generated. Please check "
226 "that reporting is enabled."
227 ),
228 content=_embed_img(None),
229 overlay=None,
230 parameters={},
231 data=data,
232 )
233
234 return _create_report(estimator, data)
235
236
237 def _create_report(estimator, data):
238 html_template = _get_estimator_template(estimator)
239 overlay, image = _define_overlay(estimator)
240 embeded_images = (
241 [_embed_img(i) for i in image]
242 if isinstance(image, list)
243 else _embed_img(image)
244 )
245 parameters = _str_params(estimator.get_params())
246 docstring = estimator.__doc__
247 snippet = docstring.partition("Parameters\n ----------\n")[0]
248 return _update_template(
249 title=estimator.__class__.__name__,
250 docstring=snippet,
251 content=embeded_images,
252 overlay=_embed_img(overlay),
253 parameters=parameters,
254 data=data,
255 template_name=html_template,
256 )
257
258
259 class HTMLReport(HTMLDocument):
260 """A report written as HTML.
261
262 Methods such as ``save_as_html``, or ``open_in_browser``
263 are inherited from class ``nilearn.plotting.html_document.HTMLDocument``.
264
265 """
266
267 def __init__(self, head_tpl, body, head_values={}):
268 """Construct the ``HTMLReport`` class.
269
270 Parameters
271 ----------
272 head_tpl : Template
273 This is meant for display as a full page, eg writing on disk.
274 This is the Template object used to generate the HTML head
275 section of the report. The template should be filled with:
276
277 - title: The title of the HTML page.
278 - body: The full body of the HTML page. Provided through
279 the ``body`` input.
280
281 body : :obj:`str`
282 This parameter is used for embedding in the provided
283 ``head_tpl`` template. It contains the full body of the
284 HTML page.
285
286 head_values : :obj:`dict`, default={}
287 Additional substitutions in ``head_tpl``.
288
289 .. note::
290 This can be used to provide additional values
291 with custom templates.
292
293 """
294 html = head_tpl.safe_substitute(body=body, **head_values)
295 super().__init__(html)
296 self.head_tpl = head_tpl
297 self.body = body
298
299 def _repr_html_(self):
300 """Return body of the report.
301
302 Method used by the Jupyter notebook.
303 Users normally won't call this method explicitly.
304 """
305 return self.body
306
307 def __str__(self):
308 return self.body
309
[end of nilearn/reporting/html_report.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nilearn/reporting/html_report.py b/nilearn/reporting/html_report.py
--- a/nilearn/reporting/html_report.py
+++ b/nilearn/reporting/html_report.py
@@ -13,7 +13,9 @@
ESTIMATOR_TEMPLATES = {
"NiftiLabelsMasker": "report_body_template_niftilabelsmasker.html",
+ "MultiNiftiLabelsMasker": "report_body_template_niftilabelsmasker.html",
"NiftiMapsMasker": "report_body_template_niftimapsmasker.html",
+ "MultiNiftiMapsMasker": "report_body_template_niftimapsmasker.html",
"NiftiSpheresMasker": "report_body_template_niftispheresmasker.html",
"default": "report_body_template.html",
}
| {"golden_diff": "diff --git a/nilearn/reporting/html_report.py b/nilearn/reporting/html_report.py\n--- a/nilearn/reporting/html_report.py\n+++ b/nilearn/reporting/html_report.py\n@@ -13,7 +13,9 @@\n \n ESTIMATOR_TEMPLATES = {\n \"NiftiLabelsMasker\": \"report_body_template_niftilabelsmasker.html\",\n+ \"MultiNiftiLabelsMasker\": \"report_body_template_niftilabelsmasker.html\",\n \"NiftiMapsMasker\": \"report_body_template_niftimapsmasker.html\",\n+ \"MultiNiftiMapsMasker\": \"report_body_template_niftimapsmasker.html\",\n \"NiftiSpheresMasker\": \"report_body_template_niftispheresmasker.html\",\n \"default\": \"report_body_template.html\",\n }\n", "issue": "[BUG] MultiNiftiMapsMasker reports are empty\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Operating system\n\n- [X] Linux\n- [ ] Mac\n- [ ] Windows\n\n### Operating system version\n\nFor example one of the following:\r\n- Linux Ubuntu 22.04\r\n\r\n\n\n### Python version\n\n- [ ] 3.12\n- [X] 3.11\n- [ ] 3.10\n- [ ] 3.9\n- [ ] 3.8\n\n### nilearn version\n\ndev\n\n### Expected behavior\n\nto have non empty reports\n\n### Current behavior & error messages\n\nThis is what I got:\r\n\r\n\r\n\r\n\r\n\r\n\n\n### Steps and code to reproduce bug\n\n```python\r\nfrom pathlib import Path\r\nfrom nilearn import datasets\r\nfrom nilearn.maskers import (\r\n MultiNiftiMapsMasker\r\n)\r\n\r\nREPORTS_DIR = Path().cwd()\r\n\r\ndifumo = datasets.fetch_atlas_difumo(\r\n dimension=64, resolution_mm=2, legacy_format=False\r\n)\r\n\r\ndata = datasets.fetch_development_fmri(n_subjects=2)\r\n\r\nmasker = MultiNiftiMapsMasker(\r\n maps_img=difumo.maps,\r\n standardize=\"zscore_sample\",\r\n standardize_confounds=\"zscore_sample\",\r\n memory=\"nilearn_cache\",\r\n n_jobs=2,\r\n)\r\n\r\nmasker.fit()\r\nreport = masker.generate_report()\r\nreport.save_as_html(REPORTS_DIR / \"multi_nifti_maps_masker_atlas.html\")\r\n\r\n_ = masker.fit_transform(data.func, confounds=data.confounds)\r\nreport = masker.generate_report()\r\nreport.save_as_html(REPORTS_DIR / \"multi_nifti_maps_masker_fitted.html\")\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Generate HTML reports.\"\"\"\n\nimport copy\nimport os\nimport warnings\nfrom pathlib import Path\nfrom string import Template\n\nfrom nilearn.externals import tempita\nfrom nilearn.maskers import NiftiSpheresMasker\nfrom nilearn.plotting.html_document import HTMLDocument\nfrom nilearn.reporting.utils import figure_to_svg_base64\n\nESTIMATOR_TEMPLATES = {\n \"NiftiLabelsMasker\": \"report_body_template_niftilabelsmasker.html\",\n \"NiftiMapsMasker\": \"report_body_template_niftimapsmasker.html\",\n \"NiftiSpheresMasker\": \"report_body_template_niftispheresmasker.html\",\n \"default\": \"report_body_template.html\",\n}\n\n\ndef _get_estimator_template(estimator):\n \"\"\"Return the HTML template to use for a given estimator \\\n if a specific template was defined in ESTIMATOR_TEMPLATES, \\\n otherwise return the default template.\n\n Parameters\n ----------\n estimator : object instance of BaseEstimator\n The object we wish to retrieve template of.\n\n Returns\n -------\n template : str\n Name of the template file to use.\n\n \"\"\"\n if estimator.__class__.__name__ in ESTIMATOR_TEMPLATES:\n return ESTIMATOR_TEMPLATES[estimator.__class__.__name__]\n else:\n return ESTIMATOR_TEMPLATES[\"default\"]\n\n\ndef _embed_img(display):\n \"\"\"Embed an image or just return its instance if already embedded.\n\n Parameters\n ----------\n display : obj\n A Nilearn plotting object to display.\n\n Returns\n -------\n embed : str\n Binary image string.\n\n \"\"\"\n if display is None: # no image to display\n return None\n # If already embedded, simply return as is\n if isinstance(display, str):\n return display\n return figure_to_svg_base64(display.frame_axes.figure)\n\n\ndef _str_params(params):\n \"\"\"Convert NoneType values to the string 'None' for display.\n\n Parameters\n ----------\n params : dict\n A dictionary of input values to a function.\n\n \"\"\"\n params_str = copy.deepcopy(params)\n for k, v in params_str.items():\n if v is None:\n params_str[k] = \"None\"\n return params_str\n\n\ndef _update_template(\n title, docstring, content, overlay, parameters, data, template_name=None\n):\n \"\"\"Populate a report with content.\n\n Parameters\n ----------\n title : str\n The title for the report.\n\n docstring : str\n The introductory docstring for the reported object.\n\n content : img\n The content to display.\n\n overlay : img\n Overlaid content, to appear on hover.\n\n parameters : dict\n A dictionary of object parameters and their values.\n\n data : dict\n A dictionary holding the data to be added to the report.\n The keys must match exactly the ones used in the template.\n The default template accepts the following:\n - description (str) : Description of the content.\n - warning_message (str) : An optional warning\n message to be displayed in red. This is used\n for example when no image was provided to the\n estimator when fitting.\n The NiftiLabelsMasker template accepts the additional\n fields:\n - summary (dict) : A summary description of the\n region labels and sizes. This will be displayed\n as an expandable table in the report.\n\n template_name : str, optional\n The name of the template to use. If not provided, the\n default template `report_body_template.html` will be\n used.\n\n Returns\n -------\n report : HTMLReport\n An instance of a populated HTML report.\n\n \"\"\"\n resource_path = Path(__file__).resolve().parent.joinpath(\"data\", \"html\")\n\n if template_name is None:\n body_template_name = \"report_body_template.html\"\n else:\n body_template_name = template_name\n body_template_path = resource_path.joinpath(body_template_name)\n if not os.path.exists(str(body_template_path)):\n raise FileNotFoundError(f\"No template {body_template_name}\")\n tpl = tempita.HTMLTemplate.from_filename(\n str(body_template_path), encoding=\"utf-8\"\n )\n body = tpl.substitute(\n title=title,\n content=content,\n overlay=overlay,\n docstring=docstring,\n parameters=parameters,\n **data,\n )\n\n head_template_name = \"report_head_template.html\"\n head_template_path = resource_path.joinpath(head_template_name)\n with open(str(head_template_path)) as head_file:\n head_tpl = Template(head_file.read())\n\n return HTMLReport(body=body, head_tpl=head_tpl)\n\n\ndef _define_overlay(estimator):\n \"\"\"Determine whether an overlay was provided and \\\n update the report text as appropriate.\"\"\"\n displays = estimator._reporting()\n\n if len(displays) == 1: # set overlay to None\n overlay, image = None, displays[0]\n\n elif isinstance(estimator, NiftiSpheresMasker):\n overlay, image = None, displays\n\n elif len(displays) == 2:\n overlay, image = displays[0], displays[1]\n\n else:\n overlay, image = None, displays\n\n return overlay, image\n\n\ndef generate_report(estimator):\n \"\"\"Generate a report for Nilearn objects.\n\n Reports are useful to visualize steps in a processing pipeline.\n Example use case: visualize the overlap of a mask and reference image\n in NiftiMasker.\n\n Parameters\n ----------\n estimator : Object instance of BaseEstimator.\n Object for which the report should be generated.\n\n Returns\n -------\n report : HTMLReport\n\n \"\"\"\n if hasattr(estimator, \"_report_content\"):\n data = estimator._report_content\n else:\n data = {}\n if not hasattr(estimator, \"_reporting_data\"):\n warnings.warn(\n \"This object has not been fitted yet ! \"\n \"Make sure to run `fit` before inspecting reports.\"\n )\n return _update_template(\n title=\"Empty Report\",\n docstring=(\n \"This report was not generated. Please `fit` the object.\"\n ),\n content=_embed_img(None),\n overlay=None,\n parameters={},\n data=data,\n )\n\n elif estimator._reporting_data is None:\n warnings.warn(\n \"Report generation not enabled ! \"\n \"No visual outputs will be created.\"\n )\n return _update_template(\n title=\"Empty Report\",\n docstring=(\n \"This report was not \"\n \"generated. Please check \"\n \"that reporting is enabled.\"\n ),\n content=_embed_img(None),\n overlay=None,\n parameters={},\n data=data,\n )\n\n return _create_report(estimator, data)\n\n\ndef _create_report(estimator, data):\n html_template = _get_estimator_template(estimator)\n overlay, image = _define_overlay(estimator)\n embeded_images = (\n [_embed_img(i) for i in image]\n if isinstance(image, list)\n else _embed_img(image)\n )\n parameters = _str_params(estimator.get_params())\n docstring = estimator.__doc__\n snippet = docstring.partition(\"Parameters\\n ----------\\n\")[0]\n return _update_template(\n title=estimator.__class__.__name__,\n docstring=snippet,\n content=embeded_images,\n overlay=_embed_img(overlay),\n parameters=parameters,\n data=data,\n template_name=html_template,\n )\n\n\nclass HTMLReport(HTMLDocument):\n \"\"\"A report written as HTML.\n\n Methods such as ``save_as_html``, or ``open_in_browser``\n are inherited from class ``nilearn.plotting.html_document.HTMLDocument``.\n\n \"\"\"\n\n def __init__(self, head_tpl, body, head_values={}):\n \"\"\"Construct the ``HTMLReport`` class.\n\n Parameters\n ----------\n head_tpl : Template\n This is meant for display as a full page, eg writing on disk.\n This is the Template object used to generate the HTML head\n section of the report. The template should be filled with:\n\n - title: The title of the HTML page.\n - body: The full body of the HTML page. Provided through\n the ``body`` input.\n\n body : :obj:`str`\n This parameter is used for embedding in the provided\n ``head_tpl`` template. It contains the full body of the\n HTML page.\n\n head_values : :obj:`dict`, default={}\n Additional substitutions in ``head_tpl``.\n\n .. note::\n This can be used to provide additional values\n with custom templates.\n\n \"\"\"\n html = head_tpl.safe_substitute(body=body, **head_values)\n super().__init__(html)\n self.head_tpl = head_tpl\n self.body = body\n\n def _repr_html_(self):\n \"\"\"Return body of the report.\n\n Method used by the Jupyter notebook.\n Users normally won't call this method explicitly.\n \"\"\"\n return self.body\n\n def __str__(self):\n return self.body\n", "path": "nilearn/reporting/html_report.py"}]} | 3,836 | 188 |
gh_patches_debug_27721 | rasdani/github-patches | git_diff | Flexget__Flexget-2513 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Notify (Pushbullet) Plugin Error.
Hi, since last week i have this error in my log. Up to this point everything worked without problems.
```
2019-11-13 10:30 ERROR notify_entry NexBox 'x-ratelimit-reset'
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/flexget/components/notify/notify.py", line 104, in send_notification
send_notification(*args, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/flexget/components/notify/notification_framework.py", line 124, in send_notification
title, message, rendered_config
File "/usr/local/lib/python3.5/dist-packages/flexget/components/notify/notifiers/pushbullet.py", line 89, in notify
self.send_push(key, title, message, config.get('url'), d, 'device_iden')
File "/usr/local/lib/python3.5/dist-packages/flexget/components/notify/notifiers/pushbullet.py", line 130, in send_push
int(response.headers['X-Ratelimit-Reset'])
File "/usr/local/lib/python3.5/dist-packages/requests/structures.py", line 52, in __getitem__
return self._store[key.lower()][1]
KeyError: 'x-ratelimit-reset'
```
Flexget: 2.21.32
API: 1.5.0
Same with Flexget 3.0.8 and Python 3.8
</issue>
<code>
[start of flexget/components/notify/notifiers/pushbullet.py]
1 import base64
2 import datetime
3 import logging
4
5 from requests.exceptions import RequestException
6
7 from flexget import plugin
8 from flexget.config_schema import one_or_more
9 from flexget.event import event
10 from flexget.plugin import PluginWarning
11 from flexget.utils.requests import Session as RequestSession
12 from flexget.utils.requests import TimedLimiter
13
14 plugin_name = 'pushbullet'
15 log = logging.getLogger(plugin_name)
16
17 PUSHBULLET_URL = 'https://api.pushbullet.com/v2/pushes'
18
19 requests = RequestSession(max_retries=3)
20 requests.add_domain_limiter(TimedLimiter('pushbullet.com', '5 seconds'))
21
22
23 class PushbulletNotifier:
24 """
25 Example::
26
27 notify:
28 entries:
29 via:
30 pushbullet:
31 apikey: <API_KEY>
32 [device: <DEVICE_IDEN> (can also be a list of device ids, or don't specify any ids to send to all devices)]
33 [email: <EMAIL_ADDRESS> (can also be a list of user email addresses)]
34 [channel: <CHANNEL_TAG> (you can only specify device / email or channel tag, cannot use both)]
35
36 Configuration parameters are also supported from entries (eg. through set).
37 """
38
39 schema = {
40 'type': 'object',
41 'properties': {
42 'api_key': one_or_more({'type': 'string'}),
43 'device': one_or_more({'type': 'string'}),
44 'email': one_or_more({'type': 'string', 'format': 'email'}),
45 'url': {'type': 'string'},
46 'channel': {'type': 'string'},
47 'file_template': {'type': 'string'},
48 },
49 'required': ['api_key'],
50 'oneOf': [
51 {'required': ['device']},
52 {'required': ['channel']},
53 {'required': ['email']},
54 {
55 'not': {
56 'anyOf': [
57 {'required': ['device']},
58 {'required': ['channel']},
59 {'required': ['email']},
60 ]
61 }
62 },
63 ],
64 'error_oneOf': 'One (and only one) of `email`, `device` or `channel` are allowed.',
65 'additionalProperties': False,
66 }
67
68 def notify(self, title, message, config):
69 """
70 Send a Pushbullet notification
71 """
72 if config.get('device') and not isinstance(config['device'], list):
73 config['device'] = [config['device']]
74
75 if config.get('email') and not isinstance(config['email'], list):
76 config['email'] = [config['email']]
77
78 if not isinstance(config['api_key'], list):
79 config['api_key'] = [config['api_key']]
80
81 for key in config['api_key']:
82 if config.get('channel'):
83 self.send_push(
84 key, title, message, config.get('url'), config.get('channel'), 'channel_tag'
85 )
86 elif config.get('device'):
87 for d in config['device']:
88 self.send_push(key, title, message, config.get('url'), d, 'device_iden')
89 elif config.get('email'):
90 for e in config['email']:
91 self.send_push(key, title, message, config.get('url'), e, 'email')
92 else:
93 self.send_push(key, title, message, config.get('url'))
94
95 def send_push(self, api_key, title, body, url=None, destination=None, destination_type=None):
96 push_type = 'link' if url else 'note'
97 notification = {'type': push_type, 'title': title, 'body': body}
98 if url:
99 notification['url'] = url
100 if destination:
101 notification[destination_type] = destination
102
103 # Make the request
104 headers = {
105 'Authorization': b'Basic ' + base64.b64encode(api_key.encode('ascii')),
106 'Content-Type': 'application/json',
107 'Accept': 'application/json',
108 'User-Agent': 'Flexget',
109 }
110 try:
111 response = requests.post(PUSHBULLET_URL, headers=headers, json=notification)
112 except RequestException as e:
113 if e.response is not None:
114 if e.response.status_code == 429:
115 reset_time = e.response.headers.get('X-Ratelimit-Reset')
116 if reset_time:
117 reset_time = datetime.datetime.fromtimestamp(int(reset_time)).strftime(
118 '%Y-%m-%d %H:%M:%S'
119 )
120 message = f'Monthly Pushbullet database operations limit reached. Next reset: {reset_time}'
121 else:
122 message = e.response.json()['error']['message']
123 else:
124 message = str(e)
125 raise PluginWarning(message)
126
127 reset_time = datetime.datetime.fromtimestamp(
128 int(response.headers['X-Ratelimit-Reset'])
129 ).strftime('%Y-%m-%d %H:%M:%S')
130 remaining = response.headers['X-Ratelimit-Remaining']
131 log.debug(
132 'Pushbullet notification sent. Database operations remaining until next reset: %s. '
133 'Next reset at: %s',
134 remaining,
135 reset_time,
136 )
137
138
139 @event('plugin.register')
140 def register_plugin():
141 plugin.register(PushbulletNotifier, plugin_name, api_ver=2, interfaces=['notifiers'])
142
[end of flexget/components/notify/notifiers/pushbullet.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flexget/components/notify/notifiers/pushbullet.py b/flexget/components/notify/notifiers/pushbullet.py
--- a/flexget/components/notify/notifiers/pushbullet.py
+++ b/flexget/components/notify/notifiers/pushbullet.py
@@ -92,7 +92,8 @@
else:
self.send_push(key, title, message, config.get('url'))
- def send_push(self, api_key, title, body, url=None, destination=None, destination_type=None):
+ @staticmethod
+ def send_push(api_key, title, body, url=None, destination=None, destination_type=None):
push_type = 'link' if url else 'note'
notification = {'type': push_type, 'title': title, 'body': body}
if url:
@@ -124,16 +125,16 @@
message = str(e)
raise PluginWarning(message)
- reset_time = datetime.datetime.fromtimestamp(
- int(response.headers['X-Ratelimit-Reset'])
- ).strftime('%Y-%m-%d %H:%M:%S')
- remaining = response.headers['X-Ratelimit-Remaining']
- log.debug(
- 'Pushbullet notification sent. Database operations remaining until next reset: %s. '
- 'Next reset at: %s',
- remaining,
- reset_time,
- )
+ reset_time = response.headers.get('X-Ratelimit-Reset')
+ remaining = response.headers.get('X-Ratelimit-Remaining')
+ if reset_time and remaining:
+ reset_time = datetime.datetime.fromtimestamp(int(reset_time))
+ log.debug(
+ 'Pushbullet notification sent. Database operations remaining until next reset: %s. '
+ 'Next reset at: %s',
+ remaining,
+ reset_time,
+ )
@event('plugin.register')
| {"golden_diff": "diff --git a/flexget/components/notify/notifiers/pushbullet.py b/flexget/components/notify/notifiers/pushbullet.py\n--- a/flexget/components/notify/notifiers/pushbullet.py\n+++ b/flexget/components/notify/notifiers/pushbullet.py\n@@ -92,7 +92,8 @@\n else:\n self.send_push(key, title, message, config.get('url'))\n \n- def send_push(self, api_key, title, body, url=None, destination=None, destination_type=None):\n+ @staticmethod\n+ def send_push(api_key, title, body, url=None, destination=None, destination_type=None):\n push_type = 'link' if url else 'note'\n notification = {'type': push_type, 'title': title, 'body': body}\n if url:\n@@ -124,16 +125,16 @@\n message = str(e)\n raise PluginWarning(message)\n \n- reset_time = datetime.datetime.fromtimestamp(\n- int(response.headers['X-Ratelimit-Reset'])\n- ).strftime('%Y-%m-%d %H:%M:%S')\n- remaining = response.headers['X-Ratelimit-Remaining']\n- log.debug(\n- 'Pushbullet notification sent. Database operations remaining until next reset: %s. '\n- 'Next reset at: %s',\n- remaining,\n- reset_time,\n- )\n+ reset_time = response.headers.get('X-Ratelimit-Reset')\n+ remaining = response.headers.get('X-Ratelimit-Remaining')\n+ if reset_time and remaining:\n+ reset_time = datetime.datetime.fromtimestamp(int(reset_time))\n+ log.debug(\n+ 'Pushbullet notification sent. Database operations remaining until next reset: %s. '\n+ 'Next reset at: %s',\n+ remaining,\n+ reset_time,\n+ )\n \n \n @event('plugin.register')\n", "issue": "Notify (Pushbullet) Plugin Error.\nHi, since last week i have this error in my log. Up to this point everything worked without problems.\r\n```\r\n2019-11-13 10:30 ERROR notify_entry NexBox 'x-ratelimit-reset'\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.5/dist-packages/flexget/components/notify/notify.py\", line 104, in send_notification\r\n send_notification(*args, **kwargs)\r\n File \"/usr/local/lib/python3.5/dist-packages/flexget/components/notify/notification_framework.py\", line 124, in send_notification\r\n title, message, rendered_config\r\n File \"/usr/local/lib/python3.5/dist-packages/flexget/components/notify/notifiers/pushbullet.py\", line 89, in notify\r\n self.send_push(key, title, message, config.get('url'), d, 'device_iden')\r\n File \"/usr/local/lib/python3.5/dist-packages/flexget/components/notify/notifiers/pushbullet.py\", line 130, in send_push\r\n int(response.headers['X-Ratelimit-Reset'])\r\n File \"/usr/local/lib/python3.5/dist-packages/requests/structures.py\", line 52, in __getitem__\r\n return self._store[key.lower()][1]\r\nKeyError: 'x-ratelimit-reset'\r\n```\r\nFlexget: 2.21.32\r\nAPI: 1.5.0\r\n\r\nSame with Flexget 3.0.8 and Python 3.8\n", "before_files": [{"content": "import base64\nimport datetime\nimport logging\n\nfrom requests.exceptions import RequestException\n\nfrom flexget import plugin\nfrom flexget.config_schema import one_or_more\nfrom flexget.event import event\nfrom flexget.plugin import PluginWarning\nfrom flexget.utils.requests import Session as RequestSession\nfrom flexget.utils.requests import TimedLimiter\n\nplugin_name = 'pushbullet'\nlog = logging.getLogger(plugin_name)\n\nPUSHBULLET_URL = 'https://api.pushbullet.com/v2/pushes'\n\nrequests = RequestSession(max_retries=3)\nrequests.add_domain_limiter(TimedLimiter('pushbullet.com', '5 seconds'))\n\n\nclass PushbulletNotifier:\n \"\"\"\n Example::\n\n notify:\n entries:\n via:\n pushbullet:\n apikey: <API_KEY>\n [device: <DEVICE_IDEN> (can also be a list of device ids, or don't specify any ids to send to all devices)]\n [email: <EMAIL_ADDRESS> (can also be a list of user email addresses)]\n [channel: <CHANNEL_TAG> (you can only specify device / email or channel tag, cannot use both)]\n\n Configuration parameters are also supported from entries (eg. through set).\n \"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'api_key': one_or_more({'type': 'string'}),\n 'device': one_or_more({'type': 'string'}),\n 'email': one_or_more({'type': 'string', 'format': 'email'}),\n 'url': {'type': 'string'},\n 'channel': {'type': 'string'},\n 'file_template': {'type': 'string'},\n },\n 'required': ['api_key'],\n 'oneOf': [\n {'required': ['device']},\n {'required': ['channel']},\n {'required': ['email']},\n {\n 'not': {\n 'anyOf': [\n {'required': ['device']},\n {'required': ['channel']},\n {'required': ['email']},\n ]\n }\n },\n ],\n 'error_oneOf': 'One (and only one) of `email`, `device` or `channel` are allowed.',\n 'additionalProperties': False,\n }\n\n def notify(self, title, message, config):\n \"\"\"\n Send a Pushbullet notification\n \"\"\"\n if config.get('device') and not isinstance(config['device'], list):\n config['device'] = [config['device']]\n\n if config.get('email') and not isinstance(config['email'], list):\n config['email'] = [config['email']]\n\n if not isinstance(config['api_key'], list):\n config['api_key'] = [config['api_key']]\n\n for key in config['api_key']:\n if config.get('channel'):\n self.send_push(\n key, title, message, config.get('url'), config.get('channel'), 'channel_tag'\n )\n elif config.get('device'):\n for d in config['device']:\n self.send_push(key, title, message, config.get('url'), d, 'device_iden')\n elif config.get('email'):\n for e in config['email']:\n self.send_push(key, title, message, config.get('url'), e, 'email')\n else:\n self.send_push(key, title, message, config.get('url'))\n\n def send_push(self, api_key, title, body, url=None, destination=None, destination_type=None):\n push_type = 'link' if url else 'note'\n notification = {'type': push_type, 'title': title, 'body': body}\n if url:\n notification['url'] = url\n if destination:\n notification[destination_type] = destination\n\n # Make the request\n headers = {\n 'Authorization': b'Basic ' + base64.b64encode(api_key.encode('ascii')),\n 'Content-Type': 'application/json',\n 'Accept': 'application/json',\n 'User-Agent': 'Flexget',\n }\n try:\n response = requests.post(PUSHBULLET_URL, headers=headers, json=notification)\n except RequestException as e:\n if e.response is not None:\n if e.response.status_code == 429:\n reset_time = e.response.headers.get('X-Ratelimit-Reset')\n if reset_time:\n reset_time = datetime.datetime.fromtimestamp(int(reset_time)).strftime(\n '%Y-%m-%d %H:%M:%S'\n )\n message = f'Monthly Pushbullet database operations limit reached. Next reset: {reset_time}'\n else:\n message = e.response.json()['error']['message']\n else:\n message = str(e)\n raise PluginWarning(message)\n\n reset_time = datetime.datetime.fromtimestamp(\n int(response.headers['X-Ratelimit-Reset'])\n ).strftime('%Y-%m-%d %H:%M:%S')\n remaining = response.headers['X-Ratelimit-Remaining']\n log.debug(\n 'Pushbullet notification sent. Database operations remaining until next reset: %s. '\n 'Next reset at: %s',\n remaining,\n reset_time,\n )\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(PushbulletNotifier, plugin_name, api_ver=2, interfaces=['notifiers'])\n", "path": "flexget/components/notify/notifiers/pushbullet.py"}]} | 2,330 | 413 |
gh_patches_debug_5182 | rasdani/github-patches | git_diff | Gallopsled__pwntools-1852 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`atexit.register` does not work
### What I did
```python3
from pwn import *
atexit.register(print, "hello world")
exit()
```
### What I expected to see
```python3 test.py
hello world
```
### What I saw
Nothing
I noticed this because `asm()`, which adds an `atexit` handler to remove the `/tmp/pwn-asm-XXXXXX` folder, does not in fact remove it, meaning multiple script runs leads to many similar folders.
</issue>
<code>
[start of pwnlib/atexit.py]
1 """
2 Replacement for the Python standard library's atexit.py.
3
4 Whereas the standard :mod:`atexit` module only defines :func:`atexit.register`,
5 this replacement module also defines :func:`unregister`.
6
7 This module also fixes a the issue that exceptions raised by an exit handler is
8 printed twice when the standard :mod:`atexit` is used.
9 """
10 from __future__ import absolute_import
11 from __future__ import division
12
13 import sys
14 import threading
15 import traceback
16
17 from pwnlib.context import context
18
19 __all__ = ['register', 'unregister']
20
21 _lock = threading.Lock()
22 _ident = 0
23 _handlers = {}
24
25 def register(func, *args, **kwargs):
26 """register(func, *args, **kwargs)
27
28 Registers a function to be called on program termination. The function will
29 be called with positional arguments `args` and keyword arguments `kwargs`,
30 i.e. ``func(*args, **kwargs)``. The current `context` is recorded and will
31 be the one used when the handler is run.
32
33 E.g. to suppress logging output from an exit-handler one could write::
34
35 with context.local(log_level = 'error'):
36 atexit.register(handler)
37
38 An identifier is returned which can be used to unregister the exit-handler.
39
40 This function can be used as a decorator::
41
42 @atexit.register
43 def handler():
44 ...
45
46 Notice however that this will bind ``handler`` to the identifier and not the
47 actual exit-handler. The exit-handler can then be unregistered with::
48
49 atexit.unregister(handler)
50
51 This function is thread safe.
52
53 """
54 global _ident
55 with _lock:
56 ident = _ident
57 _ident += 1
58 _handlers[ident] = (func, args, kwargs, vars(context))
59 return ident
60
61 def unregister(ident):
62 """unregister(ident)
63
64 Remove the exit-handler identified by `ident` from the list of registered
65 handlers. If `ident` isn't registered this is a no-op.
66 """
67 if ident in _handlers:
68 del _handlers[ident]
69
70 def _run_handlers():
71 """_run_handlers()
72
73 Run registered exit-handlers. They run in the reverse order of which they
74 were registered.
75
76 If a handler raises an exception, it will be printed but nothing else
77 happens, i.e. other handlers will be run and `sys.excepthook` will not be
78 called for that reason.
79 """
80 context.clear()
81 for _ident, (func, args, kwargs, ctx) in \
82 sorted(_handlers.items(), reverse = True):
83 try:
84 with context.local(**ctx):
85 func(*args, **kwargs)
86 except SystemExit:
87 pass
88 except Exception:
89 # extract the current exception and rewind the traceback to where it
90 # originated
91 typ, val, tb = sys.exc_info()
92 traceback.print_exception(typ, val, tb.tb_next)
93
94 # if there's already an exitfunc registered be sure to run that too
95 if hasattr(sys, "exitfunc"):
96 register(sys.exitfunc)
97
98 sys.exitfunc = _run_handlers
99
[end of pwnlib/atexit.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwnlib/atexit.py b/pwnlib/atexit.py
--- a/pwnlib/atexit.py
+++ b/pwnlib/atexit.py
@@ -13,6 +13,7 @@
import sys
import threading
import traceback
+import atexit as std_atexit
from pwnlib.context import context
@@ -95,4 +96,8 @@
if hasattr(sys, "exitfunc"):
register(sys.exitfunc)
-sys.exitfunc = _run_handlers
+if sys.version_info[0] < 3:
+ sys.exitfunc = _run_handlers
+else:
+ std_atexit.register(_run_handlers)
+
| {"golden_diff": "diff --git a/pwnlib/atexit.py b/pwnlib/atexit.py\n--- a/pwnlib/atexit.py\n+++ b/pwnlib/atexit.py\n@@ -13,6 +13,7 @@\n import sys\n import threading\n import traceback\n+import atexit as std_atexit\n \n from pwnlib.context import context\n \n@@ -95,4 +96,8 @@\n if hasattr(sys, \"exitfunc\"):\n register(sys.exitfunc)\n \n-sys.exitfunc = _run_handlers\n+if sys.version_info[0] < 3:\n+ sys.exitfunc = _run_handlers\n+else:\n+ std_atexit.register(_run_handlers)\n+\n", "issue": "`atexit.register` does not work\n### What I did\r\n```python3\r\nfrom pwn import *\r\natexit.register(print, \"hello world\")\r\nexit()\r\n```\r\n### What I expected to see\r\n```python3 test.py\r\nhello world\r\n```\r\n### What I saw\r\nNothing\r\n\r\nI noticed this because `asm()`, which adds an `atexit` handler to remove the `/tmp/pwn-asm-XXXXXX` folder, does not in fact remove it, meaning multiple script runs leads to many similar folders.\n", "before_files": [{"content": "\"\"\"\nReplacement for the Python standard library's atexit.py.\n\nWhereas the standard :mod:`atexit` module only defines :func:`atexit.register`,\nthis replacement module also defines :func:`unregister`.\n\nThis module also fixes a the issue that exceptions raised by an exit handler is\nprinted twice when the standard :mod:`atexit` is used.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport sys\nimport threading\nimport traceback\n\nfrom pwnlib.context import context\n\n__all__ = ['register', 'unregister']\n\n_lock = threading.Lock()\n_ident = 0\n_handlers = {}\n\ndef register(func, *args, **kwargs):\n \"\"\"register(func, *args, **kwargs)\n\n Registers a function to be called on program termination. The function will\n be called with positional arguments `args` and keyword arguments `kwargs`,\n i.e. ``func(*args, **kwargs)``. The current `context` is recorded and will\n be the one used when the handler is run.\n\n E.g. to suppress logging output from an exit-handler one could write::\n\n with context.local(log_level = 'error'):\n atexit.register(handler)\n\n An identifier is returned which can be used to unregister the exit-handler.\n\n This function can be used as a decorator::\n\n @atexit.register\n def handler():\n ...\n\n Notice however that this will bind ``handler`` to the identifier and not the\n actual exit-handler. The exit-handler can then be unregistered with::\n\n atexit.unregister(handler)\n\n This function is thread safe.\n\n \"\"\"\n global _ident\n with _lock:\n ident = _ident\n _ident += 1\n _handlers[ident] = (func, args, kwargs, vars(context))\n return ident\n\ndef unregister(ident):\n \"\"\"unregister(ident)\n\n Remove the exit-handler identified by `ident` from the list of registered\n handlers. If `ident` isn't registered this is a no-op.\n \"\"\"\n if ident in _handlers:\n del _handlers[ident]\n\ndef _run_handlers():\n \"\"\"_run_handlers()\n\n Run registered exit-handlers. They run in the reverse order of which they\n were registered.\n\n If a handler raises an exception, it will be printed but nothing else\n happens, i.e. other handlers will be run and `sys.excepthook` will not be\n called for that reason.\n \"\"\"\n context.clear()\n for _ident, (func, args, kwargs, ctx) in \\\n sorted(_handlers.items(), reverse = True):\n try:\n with context.local(**ctx):\n func(*args, **kwargs)\n except SystemExit:\n pass\n except Exception:\n # extract the current exception and rewind the traceback to where it\n # originated\n typ, val, tb = sys.exc_info()\n traceback.print_exception(typ, val, tb.tb_next)\n\n# if there's already an exitfunc registered be sure to run that too\nif hasattr(sys, \"exitfunc\"):\n register(sys.exitfunc)\n\nsys.exitfunc = _run_handlers\n", "path": "pwnlib/atexit.py"}]} | 1,518 | 146 |
gh_patches_debug_20723 | rasdani/github-patches | git_diff | streamlit__streamlit-1737 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
st.write throws value is null when string is too big
# Summary
calling `st.write` with a string that is too big will result on an error message on the front end with message "value is null"
<img width="551" alt="Screen Shot 2019-10-16 at 3 57 57 PM" src="https://user-images.githubusercontent.com/934511/66950384-a17d0e00-f02e-11e9-87bb-1df7158794b5.png">
# Steps to reproduce
1. create a script with a variable that holds a string that's more than weights more than 50mb
2. streamlit run yourscript.py
3. message will appear on the front end
## Expected behavior:
The error message should be clearer.
## Actual behavior:
Error message does not point to the string length constrain of `st.write`.
</issue>
<code>
[start of lib/streamlit/server/server_util.py]
1 # Copyright 2018-2020 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Server related utility functions"""
16
17 from typing import Callable, List, Optional, Union
18
19 from streamlit import config
20 from streamlit import net_util
21 from streamlit import type_util
22 from streamlit import url_util
23 from streamlit.ForwardMsgCache import populate_hash_if_needed
24
25 # Largest message that can be sent via the WebSocket connection.
26 # (Limit was picked arbitrarily)
27 # TODO: Break message in several chunks if too large.
28 MESSAGE_SIZE_LIMIT = 50 * 1e6 # 50MB
29
30
31 def is_cacheable_msg(msg):
32 """True if the given message qualifies for caching.
33
34 Parameters
35 ----------
36 msg : ForwardMsg
37
38 Returns
39 -------
40 bool
41 True if we should cache the message.
42
43 """
44 if msg.WhichOneof("type") in {"ref_hash", "initialize"}:
45 # Some message types never get cached
46 return False
47 return msg.ByteSize() >= config.get_option("global.minCachedMessageSize")
48
49
50 def serialize_forward_msg(msg):
51 """Serialize a ForwardMsg to send to a client.
52
53 If the message is too large, it will be converted to an exception message
54 instead.
55
56 Parameters
57 ----------
58 msg : ForwardMsg
59 The message to serialize
60
61 Returns
62 -------
63 str
64 The serialized byte string to send
65
66 """
67 populate_hash_if_needed(msg)
68 msg_str = msg.SerializeToString()
69
70 if len(msg_str) > MESSAGE_SIZE_LIMIT:
71 _convert_msg_to_exception_msg(msg, RuntimeError("Data too large"))
72 msg_str = msg.SerializeToString()
73
74 return msg_str
75
76
77 def _convert_msg_to_exception_msg(msg, e):
78 import streamlit.elements.exception_proto as exception_proto
79
80 delta_id = msg.metadata.delta_id
81 msg.Clear()
82 msg.metadata.delta_id = delta_id
83
84 exception_proto.marshall(msg.delta.new_element.exception, e)
85
86
87 def is_url_from_allowed_origins(url):
88 """Return True if URL is from allowed origins (for CORS purpose).
89
90 Allowed origins:
91 1. localhost
92 2. The internal and external IP addresses of the machine where this
93 function was called from.
94 3. The cloud storage domain configured in `s3.bucket`.
95
96 If `server.enableCORS` is False, this allows all origins.
97
98 Parameters
99 ----------
100 url : str
101 The URL to check
102
103 Returns
104 -------
105 bool
106 True if URL is accepted. False otherwise.
107
108 """
109 if not config.get_option("server.enableCORS"):
110 # Allow everything when CORS is disabled.
111 return True
112
113 hostname = url_util.get_hostname(url)
114
115 allowed_domains = [ # List[Union[str, Callable[[], Optional[str]]]]
116 # Check localhost first.
117 "localhost",
118 "0.0.0.0",
119 "127.0.0.1",
120 # Try to avoid making unecessary HTTP requests by checking if the user
121 # manually specified a server address.
122 _get_server_address_if_manually_set,
123 _get_s3_url_host_if_manually_set,
124 # Then try the options that depend on HTTP requests or opening sockets.
125 net_util.get_internal_ip,
126 net_util.get_external_ip,
127 lambda: config.get_option("s3.bucket"),
128 ]
129
130 for allowed_domain in allowed_domains:
131 if callable(allowed_domain):
132 allowed_domain = allowed_domain()
133
134 if allowed_domain is None:
135 continue
136
137 if hostname == allowed_domain:
138 return True
139
140 return False
141
142
143 def _get_server_address_if_manually_set() -> Optional[str]:
144 if config.is_manually_set("browser.serverAddress"):
145 return url_util.get_hostname(config.get_option("browser.serverAddress"))
146 return None
147
148
149 def _get_s3_url_host_if_manually_set() -> Optional[str]:
150 if config.is_manually_set("s3.url"):
151 return url_util.get_hostname(config.get_option("s3.url"))
152 return None
153
154
155 def make_url_path_regex(*path, **kwargs):
156 """Get a regex of the form ^/foo/bar/baz/?$ for a path (foo, bar, baz)."""
157 path = [x.strip("/") for x in path if x] # Filter out falsy components.
158 path_format = r"^/%s/?$" if kwargs.get("trailing_slash", True) else r"^/%s$"
159 return path_format % "/".join(path)
160
[end of lib/streamlit/server/server_util.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/streamlit/server/server_util.py b/lib/streamlit/server/server_util.py
--- a/lib/streamlit/server/server_util.py
+++ b/lib/streamlit/server/server_util.py
@@ -68,22 +68,19 @@
msg_str = msg.SerializeToString()
if len(msg_str) > MESSAGE_SIZE_LIMIT:
- _convert_msg_to_exception_msg(msg, RuntimeError("Data too large"))
+ import streamlit.elements.exception_proto as exception_proto
+
+ error = RuntimeError(
+ f"Data of size {len(msg_str)/1e6:.1f}MB exceeds write limit of {MESSAGE_SIZE_LIMIT/1e6}MB"
+ )
+ # Overwrite the offending ForwardMsg.delta with an error to display.
+ # This assumes that the size limit wasn't exceeded due to metadata.
+ exception_proto.marshall(msg.delta.new_element.exception, error)
msg_str = msg.SerializeToString()
return msg_str
-def _convert_msg_to_exception_msg(msg, e):
- import streamlit.elements.exception_proto as exception_proto
-
- delta_id = msg.metadata.delta_id
- msg.Clear()
- msg.metadata.delta_id = delta_id
-
- exception_proto.marshall(msg.delta.new_element.exception, e)
-
-
def is_url_from_allowed_origins(url):
"""Return True if URL is from allowed origins (for CORS purpose).
| {"golden_diff": "diff --git a/lib/streamlit/server/server_util.py b/lib/streamlit/server/server_util.py\n--- a/lib/streamlit/server/server_util.py\n+++ b/lib/streamlit/server/server_util.py\n@@ -68,22 +68,19 @@\n msg_str = msg.SerializeToString()\n \n if len(msg_str) > MESSAGE_SIZE_LIMIT:\n- _convert_msg_to_exception_msg(msg, RuntimeError(\"Data too large\"))\n+ import streamlit.elements.exception_proto as exception_proto\n+\n+ error = RuntimeError(\n+ f\"Data of size {len(msg_str)/1e6:.1f}MB exceeds write limit of {MESSAGE_SIZE_LIMIT/1e6}MB\"\n+ )\n+ # Overwrite the offending ForwardMsg.delta with an error to display.\n+ # This assumes that the size limit wasn't exceeded due to metadata.\n+ exception_proto.marshall(msg.delta.new_element.exception, error)\n msg_str = msg.SerializeToString()\n \n return msg_str\n \n \n-def _convert_msg_to_exception_msg(msg, e):\n- import streamlit.elements.exception_proto as exception_proto\n-\n- delta_id = msg.metadata.delta_id\n- msg.Clear()\n- msg.metadata.delta_id = delta_id\n-\n- exception_proto.marshall(msg.delta.new_element.exception, e)\n-\n-\n def is_url_from_allowed_origins(url):\n \"\"\"Return True if URL is from allowed origins (for CORS purpose).\n", "issue": "st.write throws value is null when string is too big\n# Summary\r\ncalling `st.write` with a string that is too big will result on an error message on the front end with message \"value is null\"\r\n\r\n<img width=\"551\" alt=\"Screen Shot 2019-10-16 at 3 57 57 PM\" src=\"https://user-images.githubusercontent.com/934511/66950384-a17d0e00-f02e-11e9-87bb-1df7158794b5.png\">\r\n\r\n# Steps to reproduce\r\n1. create a script with a variable that holds a string that's more than weights more than 50mb\r\n2. streamlit run yourscript.py\r\n3. message will appear on the front end\r\n\r\n\r\n## Expected behavior:\r\nThe error message should be clearer.\r\n\r\n## Actual behavior:\r\nError message does not point to the string length constrain of `st.write`.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Server related utility functions\"\"\"\n\nfrom typing import Callable, List, Optional, Union\n\nfrom streamlit import config\nfrom streamlit import net_util\nfrom streamlit import type_util\nfrom streamlit import url_util\nfrom streamlit.ForwardMsgCache import populate_hash_if_needed\n\n# Largest message that can be sent via the WebSocket connection.\n# (Limit was picked arbitrarily)\n# TODO: Break message in several chunks if too large.\nMESSAGE_SIZE_LIMIT = 50 * 1e6 # 50MB\n\n\ndef is_cacheable_msg(msg):\n \"\"\"True if the given message qualifies for caching.\n\n Parameters\n ----------\n msg : ForwardMsg\n\n Returns\n -------\n bool\n True if we should cache the message.\n\n \"\"\"\n if msg.WhichOneof(\"type\") in {\"ref_hash\", \"initialize\"}:\n # Some message types never get cached\n return False\n return msg.ByteSize() >= config.get_option(\"global.minCachedMessageSize\")\n\n\ndef serialize_forward_msg(msg):\n \"\"\"Serialize a ForwardMsg to send to a client.\n\n If the message is too large, it will be converted to an exception message\n instead.\n\n Parameters\n ----------\n msg : ForwardMsg\n The message to serialize\n\n Returns\n -------\n str\n The serialized byte string to send\n\n \"\"\"\n populate_hash_if_needed(msg)\n msg_str = msg.SerializeToString()\n\n if len(msg_str) > MESSAGE_SIZE_LIMIT:\n _convert_msg_to_exception_msg(msg, RuntimeError(\"Data too large\"))\n msg_str = msg.SerializeToString()\n\n return msg_str\n\n\ndef _convert_msg_to_exception_msg(msg, e):\n import streamlit.elements.exception_proto as exception_proto\n\n delta_id = msg.metadata.delta_id\n msg.Clear()\n msg.metadata.delta_id = delta_id\n\n exception_proto.marshall(msg.delta.new_element.exception, e)\n\n\ndef is_url_from_allowed_origins(url):\n \"\"\"Return True if URL is from allowed origins (for CORS purpose).\n\n Allowed origins:\n 1. localhost\n 2. The internal and external IP addresses of the machine where this\n function was called from.\n 3. The cloud storage domain configured in `s3.bucket`.\n\n If `server.enableCORS` is False, this allows all origins.\n\n Parameters\n ----------\n url : str\n The URL to check\n\n Returns\n -------\n bool\n True if URL is accepted. False otherwise.\n\n \"\"\"\n if not config.get_option(\"server.enableCORS\"):\n # Allow everything when CORS is disabled.\n return True\n\n hostname = url_util.get_hostname(url)\n\n allowed_domains = [ # List[Union[str, Callable[[], Optional[str]]]]\n # Check localhost first.\n \"localhost\",\n \"0.0.0.0\",\n \"127.0.0.1\",\n # Try to avoid making unecessary HTTP requests by checking if the user\n # manually specified a server address.\n _get_server_address_if_manually_set,\n _get_s3_url_host_if_manually_set,\n # Then try the options that depend on HTTP requests or opening sockets.\n net_util.get_internal_ip,\n net_util.get_external_ip,\n lambda: config.get_option(\"s3.bucket\"),\n ]\n\n for allowed_domain in allowed_domains:\n if callable(allowed_domain):\n allowed_domain = allowed_domain()\n\n if allowed_domain is None:\n continue\n\n if hostname == allowed_domain:\n return True\n\n return False\n\n\ndef _get_server_address_if_manually_set() -> Optional[str]:\n if config.is_manually_set(\"browser.serverAddress\"):\n return url_util.get_hostname(config.get_option(\"browser.serverAddress\"))\n return None\n\n\ndef _get_s3_url_host_if_manually_set() -> Optional[str]:\n if config.is_manually_set(\"s3.url\"):\n return url_util.get_hostname(config.get_option(\"s3.url\"))\n return None\n\n\ndef make_url_path_regex(*path, **kwargs):\n \"\"\"Get a regex of the form ^/foo/bar/baz/?$ for a path (foo, bar, baz).\"\"\"\n path = [x.strip(\"/\") for x in path if x] # Filter out falsy components.\n path_format = r\"^/%s/?$\" if kwargs.get(\"trailing_slash\", True) else r\"^/%s$\"\n return path_format % \"/\".join(path)\n", "path": "lib/streamlit/server/server_util.py"}]} | 2,223 | 298 |
gh_patches_debug_12795 | rasdani/github-patches | git_diff | pypa__setuptools-2381 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pip install -e fails on version 50: command class <class 'setuptools.command.egg_info.egg_info'> must subclass Command
```
> pip install -e .
ERROR: Command errored out with exit status 1:
command: /Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/bin/python3.6 -c 'import sys, setuptools, tokenize; sys.argv[0] = '"'"'/Users/jaykarimi/Documents/vanir/setup.py'"'"'; __file__='"'"'/Users/jaykarimi/Documents/vanir/setup.py'"'"';f=getattr(tokenize, '"'"'open'"'"', open)(__file__);code=f.read().replace('"'"'\r\n'"'"', '"'"'\n'"'"');f.close();exec(compile(code, __file__, '"'"'exec'"'"'))' egg_info
cwd: /Users/jaykarimi/Documents/vanir/
Complete output (19 lines):
/Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/lib/python3.6/site-packages/_distutils_hack/__init__.py:30: UserWarning: Setuptools is replacing distutils.
warnings.warn("Setuptools is replacing distutils.")
/Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/lib/python3.6/site-packages/setuptools/dist.py:452: UserWarning: Normalizing 'v1.7.1' to '1.7.1'
warnings.warn(tmpl.format(**locals()))
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/Users/jaykarimi/Documents/vanir/setup.py", line 127, in <module>
setup(**_conf)
File "/Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/lib/python3.6/site-packages/setuptools/__init__.py", line 153, in setup
return distutils.core.setup(**attrs)
File "/Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/lib/python3.6/site-packages/setuptools/_distutils/core.py", line 134, in setup
ok = dist.parse_command_line()
File "/Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/lib/python3.6/site-packages/setuptools/_distutils/dist.py", line 484, in parse_command_line
args = self._parse_command_opts(parser, args)
File "/Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/lib/python3.6/site-packages/setuptools/dist.py", line 903, in _parse_command_opts
nargs = _Distribution._parse_command_opts(self, parser, args)
File "/Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/lib/python3.6/site-packages/setuptools/_distutils/dist.py", line 548, in _parse_command_opts
"command class %s must subclass Command" % cmd_class)
distutils.errors.DistutilsClassError: command class <class 'setuptools.command.egg_info.egg_info'> must subclass Command
----------------------------------------
ERROR: Command errored out with exit status 1: python setup.py egg_info Check the logs for full command output.
```
</issue>
<code>
[start of _distutils_hack/__init__.py]
1 import sys
2 import os
3 import re
4 import importlib
5 import warnings
6
7
8 is_pypy = '__pypy__' in sys.builtin_module_names
9
10
11 def warn_distutils_present():
12 if 'distutils' not in sys.modules:
13 return
14 if is_pypy and sys.version_info < (3, 7):
15 # PyPy for 3.6 unconditionally imports distutils, so bypass the warning
16 # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
17 return
18 warnings.warn(
19 "Distutils was imported before Setuptools, but importing Setuptools "
20 "also replaces the `distutils` module in `sys.modules`. This may lead "
21 "to undesirable behaviors or errors. To avoid these issues, avoid "
22 "using distutils directly, ensure that setuptools is installed in the "
23 "traditional way (e.g. not an editable install), and/or make sure "
24 "that setuptools is always imported before distutils.")
25
26
27 def clear_distutils():
28 if 'distutils' not in sys.modules:
29 return
30 warnings.warn("Setuptools is replacing distutils.")
31 mods = [name for name in sys.modules if re.match(r'distutils\b', name)]
32 for name in mods:
33 del sys.modules[name]
34
35
36 def enabled():
37 """
38 Allow selection of distutils by environment variable.
39 """
40 which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')
41 return which == 'local'
42
43
44 def ensure_local_distutils():
45 clear_distutils()
46 distutils = importlib.import_module('setuptools._distutils')
47 distutils.__name__ = 'distutils'
48 sys.modules['distutils'] = distutils
49
50 # sanity check that submodules load as expected
51 core = importlib.import_module('distutils.core')
52 assert '_distutils' in core.__file__, core.__file__
53
54
55 def do_override():
56 """
57 Ensure that the local copy of distutils is preferred over stdlib.
58
59 See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
60 for more motivation.
61 """
62 if enabled():
63 warn_distutils_present()
64 ensure_local_distutils()
65
66
67 class DistutilsMetaFinder:
68 def find_spec(self, fullname, path, target=None):
69 if path is not None:
70 return
71
72 method_name = 'spec_for_{fullname}'.format(**locals())
73 method = getattr(self, method_name, lambda: None)
74 return method()
75
76 def spec_for_distutils(self):
77 import importlib.abc
78 import importlib.util
79
80 class DistutilsLoader(importlib.abc.Loader):
81
82 def create_module(self, spec):
83 return importlib.import_module('setuptools._distutils')
84
85 def exec_module(self, module):
86 pass
87
88 return importlib.util.spec_from_loader('distutils', DistutilsLoader())
89
90 def spec_for_pip(self):
91 """
92 Ensure stdlib distutils when running under pip.
93 See pypa/pip#8761 for rationale.
94 """
95 clear_distutils()
96 self.spec_for_distutils = lambda: None
97
98
99 DISTUTILS_FINDER = DistutilsMetaFinder()
100
101
102 def add_shim():
103 sys.meta_path.insert(0, DISTUTILS_FINDER)
104
105
106 def remove_shim():
107 try:
108 sys.meta_path.remove(DISTUTILS_FINDER)
109 except ValueError:
110 pass
111
[end of _distutils_hack/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/_distutils_hack/__init__.py b/_distutils_hack/__init__.py
--- a/_distutils_hack/__init__.py
+++ b/_distutils_hack/__init__.py
@@ -92,9 +92,22 @@
Ensure stdlib distutils when running under pip.
See pypa/pip#8761 for rationale.
"""
+ if self.pip_imported_during_build():
+ return
clear_distutils()
self.spec_for_distutils = lambda: None
+ @staticmethod
+ def pip_imported_during_build():
+ """
+ Detect if pip is being imported in a build script. Ref #2355.
+ """
+ import traceback
+ return any(
+ frame.f_globals['__file__'].endswith('setup.py')
+ for frame, line in traceback.walk_stack(None)
+ )
+
DISTUTILS_FINDER = DistutilsMetaFinder()
| {"golden_diff": "diff --git a/_distutils_hack/__init__.py b/_distutils_hack/__init__.py\n--- a/_distutils_hack/__init__.py\n+++ b/_distutils_hack/__init__.py\n@@ -92,9 +92,22 @@\n Ensure stdlib distutils when running under pip.\n See pypa/pip#8761 for rationale.\n \"\"\"\n+ if self.pip_imported_during_build():\n+ return\n clear_distutils()\n self.spec_for_distutils = lambda: None\n \n+ @staticmethod\n+ def pip_imported_during_build():\n+ \"\"\"\n+ Detect if pip is being imported in a build script. Ref #2355.\n+ \"\"\"\n+ import traceback\n+ return any(\n+ frame.f_globals['__file__'].endswith('setup.py')\n+ for frame, line in traceback.walk_stack(None)\n+ )\n+\n \n DISTUTILS_FINDER = DistutilsMetaFinder()\n", "issue": "pip install -e fails on version 50: command class <class 'setuptools.command.egg_info.egg_info'> must subclass Command\n```\r\n> pip install -e .\r\n ERROR: Command errored out with exit status 1:\r\n command: /Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/bin/python3.6 -c 'import sys, setuptools, tokenize; sys.argv[0] = '\"'\"'/Users/jaykarimi/Documents/vanir/setup.py'\"'\"'; __file__='\"'\"'/Users/jaykarimi/Documents/vanir/setup.py'\"'\"';f=getattr(tokenize, '\"'\"'open'\"'\"', open)(__file__);code=f.read().replace('\"'\"'\\r\\n'\"'\"', '\"'\"'\\n'\"'\"');f.close();exec(compile(code, __file__, '\"'\"'exec'\"'\"'))' egg_info\r\n cwd: /Users/jaykarimi/Documents/vanir/\r\n Complete output (19 lines):\r\n /Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/lib/python3.6/site-packages/_distutils_hack/__init__.py:30: UserWarning: Setuptools is replacing distutils.\r\n warnings.warn(\"Setuptools is replacing distutils.\")\r\n /Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/lib/python3.6/site-packages/setuptools/dist.py:452: UserWarning: Normalizing 'v1.7.1' to '1.7.1'\r\n warnings.warn(tmpl.format(**locals()))\r\n Traceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/Users/jaykarimi/Documents/vanir/setup.py\", line 127, in <module>\r\n setup(**_conf)\r\n File \"/Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/lib/python3.6/site-packages/setuptools/__init__.py\", line 153, in setup\r\n return distutils.core.setup(**attrs)\r\n File \"/Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/lib/python3.6/site-packages/setuptools/_distutils/core.py\", line 134, in setup\r\n ok = dist.parse_command_line()\r\n File \"/Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/lib/python3.6/site-packages/setuptools/_distutils/dist.py\", line 484, in parse_command_line\r\n args = self._parse_command_opts(parser, args)\r\n File \"/Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/lib/python3.6/site-packages/setuptools/dist.py\", line 903, in _parse_command_opts\r\n nargs = _Distribution._parse_command_opts(self, parser, args)\r\n File \"/Users/jaykarimi/.pyenv/versions/3.6.5/envs/ci-debug/lib/python3.6/site-packages/setuptools/_distutils/dist.py\", line 548, in _parse_command_opts\r\n \"command class %s must subclass Command\" % cmd_class)\r\n distutils.errors.DistutilsClassError: command class <class 'setuptools.command.egg_info.egg_info'> must subclass Command\r\n ----------------------------------------\r\nERROR: Command errored out with exit status 1: python setup.py egg_info Check the logs for full command output.\r\n```\n", "before_files": [{"content": "import sys\nimport os\nimport re\nimport importlib\nimport warnings\n\n\nis_pypy = '__pypy__' in sys.builtin_module_names\n\n\ndef warn_distutils_present():\n if 'distutils' not in sys.modules:\n return\n if is_pypy and sys.version_info < (3, 7):\n # PyPy for 3.6 unconditionally imports distutils, so bypass the warning\n # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250\n return\n warnings.warn(\n \"Distutils was imported before Setuptools, but importing Setuptools \"\n \"also replaces the `distutils` module in `sys.modules`. This may lead \"\n \"to undesirable behaviors or errors. To avoid these issues, avoid \"\n \"using distutils directly, ensure that setuptools is installed in the \"\n \"traditional way (e.g. not an editable install), and/or make sure \"\n \"that setuptools is always imported before distutils.\")\n\n\ndef clear_distutils():\n if 'distutils' not in sys.modules:\n return\n warnings.warn(\"Setuptools is replacing distutils.\")\n mods = [name for name in sys.modules if re.match(r'distutils\\b', name)]\n for name in mods:\n del sys.modules[name]\n\n\ndef enabled():\n \"\"\"\n Allow selection of distutils by environment variable.\n \"\"\"\n which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')\n return which == 'local'\n\n\ndef ensure_local_distutils():\n clear_distutils()\n distutils = importlib.import_module('setuptools._distutils')\n distutils.__name__ = 'distutils'\n sys.modules['distutils'] = distutils\n\n # sanity check that submodules load as expected\n core = importlib.import_module('distutils.core')\n assert '_distutils' in core.__file__, core.__file__\n\n\ndef do_override():\n \"\"\"\n Ensure that the local copy of distutils is preferred over stdlib.\n\n See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401\n for more motivation.\n \"\"\"\n if enabled():\n warn_distutils_present()\n ensure_local_distutils()\n\n\nclass DistutilsMetaFinder:\n def find_spec(self, fullname, path, target=None):\n if path is not None:\n return\n\n method_name = 'spec_for_{fullname}'.format(**locals())\n method = getattr(self, method_name, lambda: None)\n return method()\n\n def spec_for_distutils(self):\n import importlib.abc\n import importlib.util\n\n class DistutilsLoader(importlib.abc.Loader):\n\n def create_module(self, spec):\n return importlib.import_module('setuptools._distutils')\n\n def exec_module(self, module):\n pass\n\n return importlib.util.spec_from_loader('distutils', DistutilsLoader())\n\n def spec_for_pip(self):\n \"\"\"\n Ensure stdlib distutils when running under pip.\n See pypa/pip#8761 for rationale.\n \"\"\"\n clear_distutils()\n self.spec_for_distutils = lambda: None\n\n\nDISTUTILS_FINDER = DistutilsMetaFinder()\n\n\ndef add_shim():\n sys.meta_path.insert(0, DISTUTILS_FINDER)\n\n\ndef remove_shim():\n try:\n sys.meta_path.remove(DISTUTILS_FINDER)\n except ValueError:\n pass\n", "path": "_distutils_hack/__init__.py"}]} | 2,333 | 215 |
gh_patches_debug_16921 | rasdani/github-patches | git_diff | tinygrad__tinygrad-667 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Assertion error running deep_deterministic_policy_gradient.py example
- Python 3.9.2
- osx M1
` python3 deep_deterministic_policy_gradient.py`
```
cuda backend not available No module named 'pycuda'
gpu backend not available No module named 'pyopencl'
metal backend not available No module named 'Metal'
Traceback (most recent call last):
File "tinygrad/examples/deep_deterministic_policy_gradient.py", line 229, in <module>
agent = DeepDeterministicPolicyGradient(env)
File "tinygrad/examples/deep_deterministic_policy_gradient.py", line 166, in __init__
self.update_network_parameters(tau=1.0)
File "tinygrad/examples/deep_deterministic_policy_gradient.py", line 176, in update_network_parameters
target_param.assign(param * tau + target_param * (1.0 - tau))
File "tinygrad/tinygrad/tensor.py", line 83, in assign
assert not x.requires_grad # self requires_grad is okay?
AssertionError
```
</issue>
<code>
[start of examples/deep_deterministic_policy_gradient.py]
1 from typing import Optional, Tuple
2 from numpy.typing import NDArray
3
4 from tinygrad.tensor import Tensor
5 from tinygrad.nn import optim
6 from tinygrad.helpers import getenv
7
8 import numpy as np
9 import gym
10
11
12 DEVICE = "GPU" if getenv("GPU") else "CPU"
13
14
15 class Actor:
16 def __init__(self, num_actions: int, num_states: int, hidden_size: Tuple[int, int] = (400, 300)):
17 self.l1 = Tensor.glorot_uniform(num_states, hidden_size[0])
18 self.l2 = Tensor.glorot_uniform(hidden_size[0], hidden_size[1])
19 self.mu = Tensor.glorot_uniform(hidden_size[1], num_actions)
20
21 def forward(self, state: Tensor, upper_bound: float) -> Tensor:
22 out = state.dot(self.l1).relu()
23 out = out.dot(self.l2).relu()
24 out = out.dot(self.mu).tanh()
25 output = out * upper_bound
26
27 return output
28
29
30 class Critic:
31 def __init__(self, num_inputs: int, hidden_size: Tuple[int, int] = (400, 300)):
32 self.l1 = Tensor.glorot_uniform(num_inputs, hidden_size[0])
33 self.l2 = Tensor.glorot_uniform(hidden_size[0], hidden_size[1])
34 self.q = Tensor.glorot_uniform(hidden_size[1], 1)
35
36 def forward(self, state: Tensor, action: Tensor) -> Tensor:
37 inputs = state.cat(action, dim=1)
38 out = inputs.dot(self.l1).relu()
39 out = out.dot(self.l2).relu()
40 q = out.dot(self.q)
41
42 return q
43
44
45 class Buffer:
46 def __init__(self, num_actions: int, num_states: int, buffer_capacity: int = 100000, batch_size: int = 64):
47 self.buffer_capacity = buffer_capacity
48 self.batch_size = batch_size
49
50 self.buffer_counter = 0
51
52 self.state_buffer = np.zeros((self.buffer_capacity, num_states))
53 self.action_buffer = np.zeros((self.buffer_capacity, num_actions))
54 self.reward_buffer = np.zeros((self.buffer_capacity, 1))
55 self.next_state_buffer = np.zeros((self.buffer_capacity, num_states))
56 self.done_buffer = np.zeros((self.buffer_capacity, 1))
57
58 def record(
59 self, observations: Tuple[Tensor, NDArray, float, NDArray, bool]
60 ) -> None:
61 index = self.buffer_counter % self.buffer_capacity
62
63 self.state_buffer[index] = observations[0].detach().numpy()
64 self.action_buffer[index] = observations[1]
65 self.reward_buffer[index] = observations[2]
66 self.next_state_buffer[index] = observations[3]
67 self.done_buffer[index] = observations[4]
68
69 self.buffer_counter += 1
70
71 def sample(self) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
72 record_range = min(self.buffer_counter, self.buffer_capacity)
73 batch_indices = np.random.choice(record_range, self.batch_size)
74
75 state_batch = Tensor(self.state_buffer[batch_indices], device=DEVICE, requires_grad=False)
76 action_batch = Tensor(self.action_buffer[batch_indices], device=DEVICE, requires_grad=False)
77 reward_batch = Tensor(self.reward_buffer[batch_indices], device=DEVICE, requires_grad=False)
78 next_state_batch = Tensor(self.next_state_buffer[batch_indices], device=DEVICE, requires_grad=False)
79 done_batch = Tensor(self.done_buffer[batch_indices], device=DEVICE, requires_grad=False)
80
81 return state_batch, action_batch, reward_batch, next_state_batch, done_batch
82
83
84 class GaussianActionNoise:
85 def __init__(self, mean: NDArray, std_deviation: NDArray):
86 self.mean = mean
87 self.std_dev = std_deviation
88
89 def __call__(self) -> Tensor:
90 return Tensor(
91 np.random.default_rng()
92 .normal(self.mean, self.std_dev, size=self.mean.shape)
93 .astype(np.float32),
94 device=DEVICE,
95 requires_grad=False,
96 )
97
98
99 class DeepDeterministicPolicyGradient:
100 """Deep Deterministic Policy Gradient (DDPG).
101
102 https://arxiv.org/pdf/1509.02971.pdf
103
104 Args:
105 env: The environment to learn from.
106 lr_actor: The learning rate of the actor.
107 lr_critic: The learning rate of the critic.
108 gamma: The discount factor.
109 buffer_capacity: The size of the replay buffer.
110 tau: The soft update coefficient.
111 hidden_size: The number of neurons in the hidden layers of the actor and critic networks.
112 batch_size: The minibatch size for each gradient update.
113 noise_stddev: The standard deviation of the exploration noise.
114
115 Note:
116 In contrast to the original paper, actions are already included in the first layer
117 of the Critic and we use a Gaussian distribution instead of an Ornstein Uhlenbeck
118 process for exploration noise.
119
120 """
121
122 def __init__(
123 self,
124 env: gym.Env,
125 lr_actor: float = 0.001,
126 lr_critic: float = 0.002,
127 gamma: float = 0.99,
128 buffer_capacity: int = 100000,
129 tau: float = 0.005,
130 hidden_size: Tuple[int, int] = (400, 300),
131 batch_size: int = 64,
132 noise_stddev: float = 0.1,
133 ):
134 self.num_states = env.observation_space.shape[0]
135 self.num_actions = env.action_space.shape[0]
136 self.max_action = env.action_space.high.item()
137 self.min_action = env.action_space.low.item()
138 self.gamma = gamma
139 self.tau = tau
140 self.memory = Buffer(
141 self.num_actions, self.num_states, buffer_capacity, batch_size
142 )
143 self.batch_size = batch_size
144
145 self.noise = GaussianActionNoise(
146 mean=np.zeros(self.num_actions),
147 std_deviation=noise_stddev * np.ones(self.num_actions),
148 )
149
150 self.actor = Actor(self.num_actions, self.num_states, hidden_size)
151 self.critic = Critic(self.num_actions + self.num_states, hidden_size)
152 self.target_actor = Actor(self.num_actions, self.num_states, hidden_size)
153 self.target_critic = Critic(self.num_actions + self.num_states, hidden_size)
154
155 actor_params = optim.get_parameters(self.actor)
156 critic_params = optim.get_parameters(self.critic)
157 target_actor_params = optim.get_parameters(self.target_actor)
158 target_critic_params = optim.get_parameters(self.target_critic)
159
160 if DEVICE == "GPU":
161 [x.gpu_() for x in actor_params + critic_params + target_actor_params + target_critic_params]
162
163 self.actor_optimizer = optim.Adam(actor_params, lr_actor)
164 self.critic_optimizer = optim.Adam(critic_params, lr_critic)
165
166 self.update_network_parameters(tau=1.0)
167
168 def update_network_parameters(self, tau: Optional[float] = None) -> None:
169 """Updates the parameters of the target networks via 'soft updates'."""
170 if tau is None:
171 tau = self.tau
172
173 for param, target_param in zip(
174 optim.get_parameters(self.actor), optim.get_parameters(self.target_actor)
175 ):
176 target_param.assign(param * tau + target_param * (1.0 - tau))
177
178 for param, target_param in zip(
179 optim.get_parameters(self.critic), optim.get_parameters(self.target_critic)
180 ):
181 target_param.assign(param * tau + target_param * (1.0 - tau))
182
183 def choose_action(self, state: Tensor, evaluate: bool = False) -> NDArray:
184 mu = self.actor.forward(state, self.max_action)
185
186 if not evaluate:
187 mu = mu.add(self.noise())
188
189 mu = mu.clip(self.min_action, self.max_action)
190
191 return mu.detach().numpy()
192
193 def learn(self) -> None:
194 """Performs a learning step by sampling from replay buffer and updating networks."""
195 if self.memory.buffer_counter < self.batch_size:
196 return
197
198 (
199 state_batch,
200 action_batch,
201 reward_batch,
202 next_state_batch,
203 done_batch,
204 ) = self.memory.sample()
205
206 target_actions = self.target_actor.forward(next_state_batch, self.max_action)
207 y = reward_batch + self.gamma * self.target_critic.forward(
208 next_state_batch, target_actions.detach()
209 ) * (Tensor.ones(*done_batch.shape, device=DEVICE, requires_grad=False) - done_batch)
210
211 self.critic_optimizer.zero_grad()
212 critic_value = self.critic.forward(state_batch, action_batch)
213 critic_loss = y.detach().sub(critic_value).pow(2).mean()
214 critic_loss.backward()
215 self.critic_optimizer.step()
216
217 self.actor_optimizer.zero_grad()
218 actions = self.actor.forward(state_batch, self.max_action)
219 critic_value = self.critic.forward(state_batch, actions)
220 actor_loss = -critic_value.mean()
221 actor_loss.backward()
222 self.actor_optimizer.step()
223
224 self.update_network_parameters()
225
226
227 if __name__ == "__main__":
228 env = gym.make("Pendulum-v1")
229 agent = DeepDeterministicPolicyGradient(env)
230 num_episodes = 150
231
232 for episode in range(1, num_episodes+1):
233 cumulative_reward = 0.0
234 prev_state, info = env.reset() # for older gym versions only state is returned, so remove info
235 done = False
236
237 while not done:
238 prev_state = Tensor(prev_state, device=DEVICE, requires_grad=False)
239 action = agent.choose_action(prev_state)
240
241 state, reward, done, _, info = env.step(action) # for older gym versions there is only one bool, so remove _
242
243 cumulative_reward += reward
244
245 agent.memory.record((prev_state, action, reward, state, done))
246 agent.learn()
247
248 if done:
249 break
250
251 prev_state = state
252
253 print(
254 f"Episode {episode}/{num_episodes} - cumulative reward: {cumulative_reward}"
255 )
256
[end of examples/deep_deterministic_policy_gradient.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/deep_deterministic_policy_gradient.py b/examples/deep_deterministic_policy_gradient.py
--- a/examples/deep_deterministic_policy_gradient.py
+++ b/examples/deep_deterministic_policy_gradient.py
@@ -173,12 +173,12 @@
for param, target_param in zip(
optim.get_parameters(self.actor), optim.get_parameters(self.target_actor)
):
- target_param.assign(param * tau + target_param * (1.0 - tau))
+ target_param.assign(param.detach() * tau + target_param * (1.0 - tau))
for param, target_param in zip(
optim.get_parameters(self.critic), optim.get_parameters(self.target_critic)
):
- target_param.assign(param * tau + target_param * (1.0 - tau))
+ target_param.assign(param.detach() * tau + target_param * (1.0 - tau))
def choose_action(self, state: Tensor, evaluate: bool = False) -> NDArray:
mu = self.actor.forward(state, self.max_action)
| {"golden_diff": "diff --git a/examples/deep_deterministic_policy_gradient.py b/examples/deep_deterministic_policy_gradient.py\n--- a/examples/deep_deterministic_policy_gradient.py\n+++ b/examples/deep_deterministic_policy_gradient.py\n@@ -173,12 +173,12 @@\n for param, target_param in zip(\n optim.get_parameters(self.actor), optim.get_parameters(self.target_actor)\n ):\n- target_param.assign(param * tau + target_param * (1.0 - tau))\n+ target_param.assign(param.detach() * tau + target_param * (1.0 - tau))\n \n for param, target_param in zip(\n optim.get_parameters(self.critic), optim.get_parameters(self.target_critic)\n ):\n- target_param.assign(param * tau + target_param * (1.0 - tau))\n+ target_param.assign(param.detach() * tau + target_param * (1.0 - tau))\n \n def choose_action(self, state: Tensor, evaluate: bool = False) -> NDArray:\n mu = self.actor.forward(state, self.max_action)\n", "issue": "Assertion error running deep_deterministic_policy_gradient.py example\n- Python 3.9.2\r\n- osx M1\r\n\r\n` python3 deep_deterministic_policy_gradient.py`\r\n\r\n```\r\ncuda backend not available No module named 'pycuda'\r\ngpu backend not available No module named 'pyopencl'\r\nmetal backend not available No module named 'Metal'\r\nTraceback (most recent call last):\r\n File \"tinygrad/examples/deep_deterministic_policy_gradient.py\", line 229, in <module>\r\n agent = DeepDeterministicPolicyGradient(env)\r\n File \"tinygrad/examples/deep_deterministic_policy_gradient.py\", line 166, in __init__\r\n self.update_network_parameters(tau=1.0)\r\n File \"tinygrad/examples/deep_deterministic_policy_gradient.py\", line 176, in update_network_parameters\r\n target_param.assign(param * tau + target_param * (1.0 - tau))\r\n File \"tinygrad/tinygrad/tensor.py\", line 83, in assign\r\n assert not x.requires_grad # self requires_grad is okay?\r\nAssertionError\r\n```\n", "before_files": [{"content": "from typing import Optional, Tuple\nfrom numpy.typing import NDArray\n\nfrom tinygrad.tensor import Tensor\nfrom tinygrad.nn import optim\nfrom tinygrad.helpers import getenv\n\nimport numpy as np\nimport gym\n\n\nDEVICE = \"GPU\" if getenv(\"GPU\") else \"CPU\"\n\n\nclass Actor:\n def __init__(self, num_actions: int, num_states: int, hidden_size: Tuple[int, int] = (400, 300)):\n self.l1 = Tensor.glorot_uniform(num_states, hidden_size[0])\n self.l2 = Tensor.glorot_uniform(hidden_size[0], hidden_size[1])\n self.mu = Tensor.glorot_uniform(hidden_size[1], num_actions)\n\n def forward(self, state: Tensor, upper_bound: float) -> Tensor:\n out = state.dot(self.l1).relu()\n out = out.dot(self.l2).relu()\n out = out.dot(self.mu).tanh()\n output = out * upper_bound\n\n return output\n\n\nclass Critic:\n def __init__(self, num_inputs: int, hidden_size: Tuple[int, int] = (400, 300)):\n self.l1 = Tensor.glorot_uniform(num_inputs, hidden_size[0])\n self.l2 = Tensor.glorot_uniform(hidden_size[0], hidden_size[1])\n self.q = Tensor.glorot_uniform(hidden_size[1], 1)\n\n def forward(self, state: Tensor, action: Tensor) -> Tensor:\n inputs = state.cat(action, dim=1)\n out = inputs.dot(self.l1).relu()\n out = out.dot(self.l2).relu()\n q = out.dot(self.q)\n\n return q\n\n\nclass Buffer:\n def __init__(self, num_actions: int, num_states: int, buffer_capacity: int = 100000, batch_size: int = 64):\n self.buffer_capacity = buffer_capacity\n self.batch_size = batch_size\n\n self.buffer_counter = 0\n\n self.state_buffer = np.zeros((self.buffer_capacity, num_states))\n self.action_buffer = np.zeros((self.buffer_capacity, num_actions))\n self.reward_buffer = np.zeros((self.buffer_capacity, 1))\n self.next_state_buffer = np.zeros((self.buffer_capacity, num_states))\n self.done_buffer = np.zeros((self.buffer_capacity, 1))\n\n def record(\n self, observations: Tuple[Tensor, NDArray, float, NDArray, bool]\n ) -> None:\n index = self.buffer_counter % self.buffer_capacity\n\n self.state_buffer[index] = observations[0].detach().numpy()\n self.action_buffer[index] = observations[1]\n self.reward_buffer[index] = observations[2]\n self.next_state_buffer[index] = observations[3]\n self.done_buffer[index] = observations[4]\n\n self.buffer_counter += 1\n\n def sample(self) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:\n record_range = min(self.buffer_counter, self.buffer_capacity)\n batch_indices = np.random.choice(record_range, self.batch_size)\n\n state_batch = Tensor(self.state_buffer[batch_indices], device=DEVICE, requires_grad=False)\n action_batch = Tensor(self.action_buffer[batch_indices], device=DEVICE, requires_grad=False)\n reward_batch = Tensor(self.reward_buffer[batch_indices], device=DEVICE, requires_grad=False)\n next_state_batch = Tensor(self.next_state_buffer[batch_indices], device=DEVICE, requires_grad=False)\n done_batch = Tensor(self.done_buffer[batch_indices], device=DEVICE, requires_grad=False)\n\n return state_batch, action_batch, reward_batch, next_state_batch, done_batch\n\n\nclass GaussianActionNoise:\n def __init__(self, mean: NDArray, std_deviation: NDArray):\n self.mean = mean\n self.std_dev = std_deviation\n\n def __call__(self) -> Tensor:\n return Tensor(\n np.random.default_rng()\n .normal(self.mean, self.std_dev, size=self.mean.shape)\n .astype(np.float32),\n device=DEVICE,\n requires_grad=False,\n )\n\n\nclass DeepDeterministicPolicyGradient:\n \"\"\"Deep Deterministic Policy Gradient (DDPG).\n\n https://arxiv.org/pdf/1509.02971.pdf\n\n Args:\n env: The environment to learn from.\n lr_actor: The learning rate of the actor.\n lr_critic: The learning rate of the critic.\n gamma: The discount factor.\n buffer_capacity: The size of the replay buffer.\n tau: The soft update coefficient.\n hidden_size: The number of neurons in the hidden layers of the actor and critic networks.\n batch_size: The minibatch size for each gradient update.\n noise_stddev: The standard deviation of the exploration noise.\n\n Note:\n In contrast to the original paper, actions are already included in the first layer \n of the Critic and we use a Gaussian distribution instead of an Ornstein Uhlenbeck \n process for exploration noise.\n\n \"\"\"\n\n def __init__(\n self,\n env: gym.Env,\n lr_actor: float = 0.001,\n lr_critic: float = 0.002,\n gamma: float = 0.99,\n buffer_capacity: int = 100000,\n tau: float = 0.005,\n hidden_size: Tuple[int, int] = (400, 300),\n batch_size: int = 64,\n noise_stddev: float = 0.1,\n ):\n self.num_states = env.observation_space.shape[0]\n self.num_actions = env.action_space.shape[0]\n self.max_action = env.action_space.high.item()\n self.min_action = env.action_space.low.item()\n self.gamma = gamma\n self.tau = tau\n self.memory = Buffer(\n self.num_actions, self.num_states, buffer_capacity, batch_size\n )\n self.batch_size = batch_size\n\n self.noise = GaussianActionNoise(\n mean=np.zeros(self.num_actions),\n std_deviation=noise_stddev * np.ones(self.num_actions),\n )\n\n self.actor = Actor(self.num_actions, self.num_states, hidden_size)\n self.critic = Critic(self.num_actions + self.num_states, hidden_size)\n self.target_actor = Actor(self.num_actions, self.num_states, hidden_size)\n self.target_critic = Critic(self.num_actions + self.num_states, hidden_size)\n\n actor_params = optim.get_parameters(self.actor)\n critic_params = optim.get_parameters(self.critic)\n target_actor_params = optim.get_parameters(self.target_actor)\n target_critic_params = optim.get_parameters(self.target_critic)\n\n if DEVICE == \"GPU\":\n [x.gpu_() for x in actor_params + critic_params + target_actor_params + target_critic_params]\n\n self.actor_optimizer = optim.Adam(actor_params, lr_actor)\n self.critic_optimizer = optim.Adam(critic_params, lr_critic)\n\n self.update_network_parameters(tau=1.0)\n\n def update_network_parameters(self, tau: Optional[float] = None) -> None:\n \"\"\"Updates the parameters of the target networks via 'soft updates'.\"\"\"\n if tau is None:\n tau = self.tau\n\n for param, target_param in zip(\n optim.get_parameters(self.actor), optim.get_parameters(self.target_actor)\n ):\n target_param.assign(param * tau + target_param * (1.0 - tau))\n\n for param, target_param in zip(\n optim.get_parameters(self.critic), optim.get_parameters(self.target_critic)\n ):\n target_param.assign(param * tau + target_param * (1.0 - tau))\n\n def choose_action(self, state: Tensor, evaluate: bool = False) -> NDArray:\n mu = self.actor.forward(state, self.max_action)\n\n if not evaluate:\n mu = mu.add(self.noise())\n\n mu = mu.clip(self.min_action, self.max_action)\n\n return mu.detach().numpy()\n\n def learn(self) -> None:\n \"\"\"Performs a learning step by sampling from replay buffer and updating networks.\"\"\"\n if self.memory.buffer_counter < self.batch_size:\n return\n\n (\n state_batch,\n action_batch,\n reward_batch,\n next_state_batch,\n done_batch,\n ) = self.memory.sample()\n \n target_actions = self.target_actor.forward(next_state_batch, self.max_action)\n y = reward_batch + self.gamma * self.target_critic.forward(\n next_state_batch, target_actions.detach()\n ) * (Tensor.ones(*done_batch.shape, device=DEVICE, requires_grad=False) - done_batch)\n\n self.critic_optimizer.zero_grad()\n critic_value = self.critic.forward(state_batch, action_batch)\n critic_loss = y.detach().sub(critic_value).pow(2).mean()\n critic_loss.backward()\n self.critic_optimizer.step()\n\n self.actor_optimizer.zero_grad()\n actions = self.actor.forward(state_batch, self.max_action)\n critic_value = self.critic.forward(state_batch, actions)\n actor_loss = -critic_value.mean()\n actor_loss.backward()\n self.actor_optimizer.step()\n\n self.update_network_parameters()\n\n\nif __name__ == \"__main__\":\n env = gym.make(\"Pendulum-v1\")\n agent = DeepDeterministicPolicyGradient(env)\n num_episodes = 150\n\n for episode in range(1, num_episodes+1):\n cumulative_reward = 0.0\n prev_state, info = env.reset() # for older gym versions only state is returned, so remove info\n done = False\n\n while not done:\n prev_state = Tensor(prev_state, device=DEVICE, requires_grad=False)\n action = agent.choose_action(prev_state)\n\n state, reward, done, _, info = env.step(action) # for older gym versions there is only one bool, so remove _\n\n cumulative_reward += reward\n\n agent.memory.record((prev_state, action, reward, state, done))\n agent.learn()\n\n if done:\n break\n\n prev_state = state\n\n print(\n f\"Episode {episode}/{num_episodes} - cumulative reward: {cumulative_reward}\"\n )\n", "path": "examples/deep_deterministic_policy_gradient.py"}]} | 3,660 | 230 |
gh_patches_debug_17339 | rasdani/github-patches | git_diff | Pycord-Development__pycord-1094 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Optional Modal InputText fields with existing values do not accept empty strings
### Summary
Submitted Empty InputText fields revert to stored value
### Reproduction Steps
1. Create a Modal with an `InputText` that is both `required=False` and has a `value`
2. Delete the text in the Modal and submit
3. Inspect the Modal's children in the callback.
### Minimal Reproducible Code
_No response_
### Expected Results
Expected: The incoming field (e.g. `self.children[0].value`) should be empty
### Actual Results
Actual: The field retains its previous/existing `value` instead of being overwritten by the empty string.
### Intents
members
### System Information
Python 3.9
Pycord 2.0.0b4
### Checklist
- [X] I have searched the open issues for duplicates.
- [X] I have shown the entire traceback, if possible.
- [X] I have removed my token from display, if visible.
### Additional Context
I did verify through logging that the Modal does indeed submit the empty value, but I was unable to find where in the code this was handled.
Optional Modal InputText fields with existing values do not accept empty strings
### Summary
Submitted Empty InputText fields revert to stored value
### Reproduction Steps
1. Create a Modal with an `InputText` that is both `required=False` and has a `value`
2. Delete the text in the Modal and submit
3. Inspect the Modal's children in the callback.
### Minimal Reproducible Code
_No response_
### Expected Results
Expected: The incoming field (e.g. `self.children[0].value`) should be empty
### Actual Results
Actual: The field retains its previous/existing `value` instead of being overwritten by the empty string.
### Intents
members
### System Information
Python 3.9
Pycord 2.0.0b4
### Checklist
- [X] I have searched the open issues for duplicates.
- [X] I have shown the entire traceback, if possible.
- [X] I have removed my token from display, if visible.
### Additional Context
I did verify through logging that the Modal does indeed submit the empty value, but I was unable to find where in the code this was handled.
</issue>
<code>
[start of discord/ui/input_text.py]
1 from __future__ import annotations
2
3 import os
4 from typing import TYPE_CHECKING, Optional
5
6 from ..components import InputText as InputTextComponent
7 from ..enums import ComponentType, InputTextStyle
8 from ..utils import MISSING
9
10 __all__ = ("InputText",)
11
12 if TYPE_CHECKING:
13 from ..types.components import InputText as InputTextComponentPayload
14
15
16 class InputText:
17 """Represents a UI text input field.
18
19 Parameters
20 ----------
21 style: :class:`discord.InputTextStyle`
22 The style of the input text field.
23 custom_id: Optional[:class:`str`]
24 The ID of the input text field that gets received during an interaction.
25 label: :class:`str`
26 The label for the input text field.
27 Must be 45 characters or fewer.
28 placeholder: Optional[:class:`str`]
29 The placeholder text that is shown if nothing is selected, if any.
30 Must be 100 characters or fewer.
31 min_length: Optional[:class:`int`]
32 The minimum number of characters that must be entered.
33 Defaults to 0 and must be less than 4000.
34 max_length: Optional[:class:`int`]
35 The maximum number of characters that can be entered.
36 Must be between 1 and 4000.
37 required: Optional[:class:`bool`]
38 Whether the input text field is required or not. Defaults to `True`.
39 value: Optional[:class:`str`]
40 Pre-fills the input text field with this value.
41 Must be 4000 characters or fewer.
42 row: Optional[:class:`int`]
43 The relative row this input text field belongs to. A modal dialog can only have 5
44 rows. By default, items are arranged automatically into those 5 rows. If you'd
45 like to control the relative positioning of the row then passing an index is advised.
46 For example, row=1 will show up before row=2. Defaults to ``None``, which is automatic
47 ordering. The row number must be between 0 and 4 (i.e. zero indexed).
48 """
49
50 def __init__(
51 self,
52 *,
53 style: InputTextStyle = InputTextStyle.short,
54 custom_id: str = MISSING,
55 label: str,
56 placeholder: Optional[str] = None,
57 min_length: Optional[int] = None,
58 max_length: Optional[int] = None,
59 required: Optional[bool] = True,
60 value: Optional[str] = None,
61 row: Optional[int] = None,
62 ):
63 super().__init__()
64 custom_id = os.urandom(16).hex() if custom_id is MISSING else custom_id
65 if not (isinstance(custom_id, str) or custom_id is None):
66 raise TypeError(f"expected custom_id to be str, not {custom_id.__class__.__name__}")
67
68 self._underlying = InputTextComponent._raw_construct(
69 type=ComponentType.input_text,
70 style=style,
71 custom_id=custom_id,
72 label=label,
73 placeholder=placeholder,
74 min_length=min_length,
75 max_length=max_length,
76 required=required,
77 value=value,
78 )
79 self._input_value = None
80 self.row = row
81 self._rendered_row: Optional[int] = None
82
83 @property
84 def type(self) -> ComponentType:
85 return self._underlying.type
86
87 @property
88 def style(self) -> InputTextStyle:
89 """:class:`discord.InputTextStyle`: The style of the input text field."""
90 return self._underlying.style
91
92 @style.setter
93 def style(self, value: InputTextStyle):
94 if not isinstance(value, InputTextStyle):
95 raise TypeError(f"style must be of type InputTextStyle not {value.__class__}")
96 self._underlying.style = value
97
98 @property
99 def custom_id(self) -> str:
100 """:class:`str`: The ID of the input text field that gets received during an interaction."""
101 return self._underlying.custom_id
102
103 @custom_id.setter
104 def custom_id(self, value: str):
105 if not isinstance(value, str):
106 raise TypeError(f"custom_id must be None or str not {value.__class__}")
107 self._underlying.custom_id = value
108
109 @property
110 def label(self) -> str:
111 """:class:`str`: The label of the input text field."""
112 return self._underlying.label
113
114 @label.setter
115 def label(self, value: str):
116 if not isinstance(value, str):
117 raise TypeError(f"label should be str not {value.__class__}")
118 self._underlying.label = value
119
120 @property
121 def placeholder(self) -> Optional[str]:
122 """Optional[:class:`str`]: The placeholder text that is shown before anything is entered, if any."""
123 return self._underlying.placeholder
124
125 @placeholder.setter
126 def placeholder(self, value: Optional[str]):
127 if value and not isinstance(value, str):
128 raise TypeError(f"placeholder must be None or str not {value.__class__}") # type: ignore
129 self._underlying.placeholder = value
130
131 @property
132 def min_length(self) -> Optional[int]:
133 """Optional[:class:`int`]: The minimum number of characters that must be entered. Defaults to `0`."""
134 return self._underlying.min_length
135
136 @min_length.setter
137 def min_length(self, value: Optional[int]):
138 if value and not isinstance(value, int):
139 raise TypeError(f"min_length must be None or int not {value.__class__}") # type: ignore
140 self._underlying.min_length = value
141
142 @property
143 def max_length(self) -> Optional[int]:
144 """Optional[:class:`int`]: The maximum number of characters that can be entered."""
145 return self._underlying.max_length
146
147 @max_length.setter
148 def max_length(self, value: Optional[int]):
149 if value and not isinstance(value, int):
150 raise TypeError(f"min_length must be None or int not {value.__class__}") # type: ignore
151 self._underlying.max_length = value
152
153 @property
154 def required(self) -> Optional[bool]:
155 """Optional[:class:`bool`]: Whether the input text field is required or not. Defaults to `True`."""
156 return self._underlying.required
157
158 @required.setter
159 def required(self, value: Optional[bool]):
160 if not isinstance(value, bool):
161 raise TypeError(f"required must be bool not {value.__class__}") # type: ignore
162 self._underlying.required = bool(value)
163
164 @property
165 def value(self) -> Optional[str]:
166 """Optional[:class:`str`]: The value entered in the text field."""
167 return self._input_value or self._underlying.value
168
169 @value.setter
170 def value(self, value: Optional[str]):
171 if value and not isinstance(value, str):
172 raise TypeError(f"value must be None or str not {value.__class__}") # type: ignore
173 self._underlying.value = value
174
175 @property
176 def width(self) -> int:
177 return 5
178
179 def to_component_dict(self) -> InputTextComponentPayload:
180 return self._underlying.to_dict()
181
182 def refresh_state(self, data) -> None:
183 self._input_value = data["value"]
184
[end of discord/ui/input_text.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/discord/ui/input_text.py b/discord/ui/input_text.py
--- a/discord/ui/input_text.py
+++ b/discord/ui/input_text.py
@@ -76,7 +76,7 @@
required=required,
value=value,
)
- self._input_value = None
+ self._input_value = False
self.row = row
self._rendered_row: Optional[int] = None
@@ -164,7 +164,10 @@
@property
def value(self) -> Optional[str]:
"""Optional[:class:`str`]: The value entered in the text field."""
- return self._input_value or self._underlying.value
+ if self._input_value is not False:
+ # only False on init, otherwise the value was either set or cleared
+ return self._input_value # type: ignore
+ return self._underlying.value
@value.setter
def value(self, value: Optional[str]):
| {"golden_diff": "diff --git a/discord/ui/input_text.py b/discord/ui/input_text.py\n--- a/discord/ui/input_text.py\n+++ b/discord/ui/input_text.py\n@@ -76,7 +76,7 @@\n required=required,\n value=value,\n )\n- self._input_value = None\n+ self._input_value = False\n self.row = row\n self._rendered_row: Optional[int] = None\n \n@@ -164,7 +164,10 @@\n @property\n def value(self) -> Optional[str]:\n \"\"\"Optional[:class:`str`]: The value entered in the text field.\"\"\"\n- return self._input_value or self._underlying.value\n+ if self._input_value is not False:\n+ # only False on init, otherwise the value was either set or cleared\n+ return self._input_value # type: ignore\n+ return self._underlying.value\n \n @value.setter\n def value(self, value: Optional[str]):\n", "issue": "Optional Modal InputText fields with existing values do not accept empty strings\n### Summary\n\nSubmitted Empty InputText fields revert to stored value\n\n### Reproduction Steps\n\n1. Create a Modal with an `InputText` that is both `required=False` and has a `value`\r\n2. Delete the text in the Modal and submit\r\n3. Inspect the Modal's children in the callback.\n\n### Minimal Reproducible Code\n\n_No response_\n\n### Expected Results\n\nExpected: The incoming field (e.g. `self.children[0].value`) should be empty\n\n### Actual Results\n\nActual: The field retains its previous/existing `value` instead of being overwritten by the empty string.\n\n### Intents\n\nmembers\n\n### System Information\n\nPython 3.9\r\nPycord 2.0.0b4\n\n### Checklist\n\n- [X] I have searched the open issues for duplicates.\n- [X] I have shown the entire traceback, if possible.\n- [X] I have removed my token from display, if visible.\n\n### Additional Context\n\nI did verify through logging that the Modal does indeed submit the empty value, but I was unable to find where in the code this was handled.\nOptional Modal InputText fields with existing values do not accept empty strings\n### Summary\n\nSubmitted Empty InputText fields revert to stored value\n\n### Reproduction Steps\n\n1. Create a Modal with an `InputText` that is both `required=False` and has a `value`\r\n2. Delete the text in the Modal and submit\r\n3. Inspect the Modal's children in the callback.\n\n### Minimal Reproducible Code\n\n_No response_\n\n### Expected Results\n\nExpected: The incoming field (e.g. `self.children[0].value`) should be empty\n\n### Actual Results\n\nActual: The field retains its previous/existing `value` instead of being overwritten by the empty string.\n\n### Intents\n\nmembers\n\n### System Information\n\nPython 3.9\r\nPycord 2.0.0b4\n\n### Checklist\n\n- [X] I have searched the open issues for duplicates.\n- [X] I have shown the entire traceback, if possible.\n- [X] I have removed my token from display, if visible.\n\n### Additional Context\n\nI did verify through logging that the Modal does indeed submit the empty value, but I was unable to find where in the code this was handled.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport os\nfrom typing import TYPE_CHECKING, Optional\n\nfrom ..components import InputText as InputTextComponent\nfrom ..enums import ComponentType, InputTextStyle\nfrom ..utils import MISSING\n\n__all__ = (\"InputText\",)\n\nif TYPE_CHECKING:\n from ..types.components import InputText as InputTextComponentPayload\n\n\nclass InputText:\n \"\"\"Represents a UI text input field.\n\n Parameters\n ----------\n style: :class:`discord.InputTextStyle`\n The style of the input text field.\n custom_id: Optional[:class:`str`]\n The ID of the input text field that gets received during an interaction.\n label: :class:`str`\n The label for the input text field.\n Must be 45 characters or fewer.\n placeholder: Optional[:class:`str`]\n The placeholder text that is shown if nothing is selected, if any.\n Must be 100 characters or fewer.\n min_length: Optional[:class:`int`]\n The minimum number of characters that must be entered.\n Defaults to 0 and must be less than 4000.\n max_length: Optional[:class:`int`]\n The maximum number of characters that can be entered.\n Must be between 1 and 4000.\n required: Optional[:class:`bool`]\n Whether the input text field is required or not. Defaults to `True`.\n value: Optional[:class:`str`]\n Pre-fills the input text field with this value.\n Must be 4000 characters or fewer.\n row: Optional[:class:`int`]\n The relative row this input text field belongs to. A modal dialog can only have 5\n rows. By default, items are arranged automatically into those 5 rows. If you'd\n like to control the relative positioning of the row then passing an index is advised.\n For example, row=1 will show up before row=2. Defaults to ``None``, which is automatic\n ordering. The row number must be between 0 and 4 (i.e. zero indexed).\n \"\"\"\n\n def __init__(\n self,\n *,\n style: InputTextStyle = InputTextStyle.short,\n custom_id: str = MISSING,\n label: str,\n placeholder: Optional[str] = None,\n min_length: Optional[int] = None,\n max_length: Optional[int] = None,\n required: Optional[bool] = True,\n value: Optional[str] = None,\n row: Optional[int] = None,\n ):\n super().__init__()\n custom_id = os.urandom(16).hex() if custom_id is MISSING else custom_id\n if not (isinstance(custom_id, str) or custom_id is None):\n raise TypeError(f\"expected custom_id to be str, not {custom_id.__class__.__name__}\")\n\n self._underlying = InputTextComponent._raw_construct(\n type=ComponentType.input_text,\n style=style,\n custom_id=custom_id,\n label=label,\n placeholder=placeholder,\n min_length=min_length,\n max_length=max_length,\n required=required,\n value=value,\n )\n self._input_value = None\n self.row = row\n self._rendered_row: Optional[int] = None\n\n @property\n def type(self) -> ComponentType:\n return self._underlying.type\n\n @property\n def style(self) -> InputTextStyle:\n \"\"\":class:`discord.InputTextStyle`: The style of the input text field.\"\"\"\n return self._underlying.style\n\n @style.setter\n def style(self, value: InputTextStyle):\n if not isinstance(value, InputTextStyle):\n raise TypeError(f\"style must be of type InputTextStyle not {value.__class__}\")\n self._underlying.style = value\n\n @property\n def custom_id(self) -> str:\n \"\"\":class:`str`: The ID of the input text field that gets received during an interaction.\"\"\"\n return self._underlying.custom_id\n\n @custom_id.setter\n def custom_id(self, value: str):\n if not isinstance(value, str):\n raise TypeError(f\"custom_id must be None or str not {value.__class__}\")\n self._underlying.custom_id = value\n\n @property\n def label(self) -> str:\n \"\"\":class:`str`: The label of the input text field.\"\"\"\n return self._underlying.label\n\n @label.setter\n def label(self, value: str):\n if not isinstance(value, str):\n raise TypeError(f\"label should be str not {value.__class__}\")\n self._underlying.label = value\n\n @property\n def placeholder(self) -> Optional[str]:\n \"\"\"Optional[:class:`str`]: The placeholder text that is shown before anything is entered, if any.\"\"\"\n return self._underlying.placeholder\n\n @placeholder.setter\n def placeholder(self, value: Optional[str]):\n if value and not isinstance(value, str):\n raise TypeError(f\"placeholder must be None or str not {value.__class__}\") # type: ignore\n self._underlying.placeholder = value\n\n @property\n def min_length(self) -> Optional[int]:\n \"\"\"Optional[:class:`int`]: The minimum number of characters that must be entered. Defaults to `0`.\"\"\"\n return self._underlying.min_length\n\n @min_length.setter\n def min_length(self, value: Optional[int]):\n if value and not isinstance(value, int):\n raise TypeError(f\"min_length must be None or int not {value.__class__}\") # type: ignore\n self._underlying.min_length = value\n\n @property\n def max_length(self) -> Optional[int]:\n \"\"\"Optional[:class:`int`]: The maximum number of characters that can be entered.\"\"\"\n return self._underlying.max_length\n\n @max_length.setter\n def max_length(self, value: Optional[int]):\n if value and not isinstance(value, int):\n raise TypeError(f\"min_length must be None or int not {value.__class__}\") # type: ignore\n self._underlying.max_length = value\n\n @property\n def required(self) -> Optional[bool]:\n \"\"\"Optional[:class:`bool`]: Whether the input text field is required or not. Defaults to `True`.\"\"\"\n return self._underlying.required\n\n @required.setter\n def required(self, value: Optional[bool]):\n if not isinstance(value, bool):\n raise TypeError(f\"required must be bool not {value.__class__}\") # type: ignore\n self._underlying.required = bool(value)\n\n @property\n def value(self) -> Optional[str]:\n \"\"\"Optional[:class:`str`]: The value entered in the text field.\"\"\"\n return self._input_value or self._underlying.value\n\n @value.setter\n def value(self, value: Optional[str]):\n if value and not isinstance(value, str):\n raise TypeError(f\"value must be None or str not {value.__class__}\") # type: ignore\n self._underlying.value = value\n\n @property\n def width(self) -> int:\n return 5\n\n def to_component_dict(self) -> InputTextComponentPayload:\n return self._underlying.to_dict()\n\n def refresh_state(self, data) -> None:\n self._input_value = data[\"value\"]\n", "path": "discord/ui/input_text.py"}]} | 3,065 | 220 |
gh_patches_debug_15806 | rasdani/github-patches | git_diff | pwndbg__pwndbg-1409 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix cymbol command on Ubuntu 18.04
There's a cymbol test that fails on the Ubuntu 18.04 CI, this issue is there to track it
</issue>
<code>
[start of pwndbg/commands/cymbol.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 Add, load, show, edit, or delete symbols for custom structures.
6
7 For the generation of the symbols g++/gcc is being used under the hood.
8
9 In case of remote debugging a binary which is not native to your architecture it
10 is advised to configure the 'gcc-config-path' config parameter to your own cross-platform
11 gnu gcc compiled toolchain for your target architecture.
12
13 You are advised to configure the 'cymbol-editor' config parameter to the path of your
14 favorite text editor. Otherwise cymbol exapnds $EDITOR and $VISUAL environment variables
15 to find the path to the default text editor.
16 """
17
18 import argparse
19 import functools
20 import os
21 import subprocess
22 import sys
23 import tempfile
24
25 import gdb
26
27 import pwndbg
28 import pwndbg.commands
29 import pwndbg.gdblib.arch
30 import pwndbg.lib.gcc
31 import pwndbg.lib.tempfile
32 from pwndbg.color import message
33
34 gcc_compiler_path = pwndbg.gdblib.config.add_param(
35 "gcc-compiler-path",
36 "",
37 "Path to the gcc/g++ toolchain for generating imported symbols",
38 )
39
40 cymbol_editor = pwndbg.gdblib.config.add_param(
41 "cymbol-editor", "", "Path to the editor for editing custom structures"
42 )
43
44 # Remeber loaded symbols. This would be useful for 'remove-symbol-file'.
45 loaded_symbols = {}
46
47 # Where generated symbol source files are saved.
48 pwndbg_cachedir = pwndbg.lib.tempfile.cachedir("custom-symbols")
49
50
51 def unload_loaded_symbol(custom_structure_name):
52 custom_structure_symbols_file = loaded_symbols.get(custom_structure_name)
53 if custom_structure_symbols_file is not None:
54 gdb.execute(f"remove-symbol-file {custom_structure_symbols_file}")
55 loaded_symbols.pop(custom_structure_name)
56
57
58 def OnlyWhenStructFileExists(func):
59 @functools.wraps(func)
60 def wrapper(custom_structure_name):
61 pwndbg_custom_structure_path = os.path.join(pwndbg_cachedir, custom_structure_name) + ".c"
62 if not os.path.exists(pwndbg_custom_structure_path):
63 print(message.error("No custom structure was found with the given name!"))
64 return
65 return func(custom_structure_name, pwndbg_custom_structure_path)
66
67 return wrapper
68
69
70 def generate_debug_symbols(custom_structure_path, pwndbg_debug_symbols_output_file=None):
71 if not pwndbg_debug_symbols_output_file:
72 _, pwndbg_debug_symbols_output_file = tempfile.mkstemp(prefix="custom-", suffix=".dbg")
73
74 # -fno-eliminate-unused-debug-types is a handy gcc flag that lets us extract debug symbols from non-used defined structures.
75 gcc_extra_flags = [
76 custom_structure_path,
77 "-c",
78 "-g",
79 "-fno-eliminate-unused-debug-types",
80 "-o",
81 pwndbg_debug_symbols_output_file,
82 ]
83
84 # TODO: implement remote debugging support.
85 gcc_flags = pwndbg.lib.gcc.which(pwndbg.gdblib.arch)
86 if gcc_compiler_path != "":
87 gcc_flags[0] = gcc_compiler_path
88
89 gcc_cmd = gcc_flags + gcc_extra_flags
90
91 try:
92 subprocess.run(gcc_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)
93 except subprocess.CalledProcessError as exception:
94 print(message.error(exception))
95 print(
96 message.error(
97 "Failed to compile the .c file with custom structures. Please fix any compilation errors there may be."
98 )
99 )
100 return None
101 except Exception as exception:
102 print(message.error(exception))
103 print(message.error("An error occured while generating the debug symbols."))
104 return None
105
106 return pwndbg_debug_symbols_output_file
107
108
109 def add_custom_structure(custom_structure_name):
110 pwndbg_custom_structure_path = os.path.join(pwndbg_cachedir, custom_structure_name) + ".c"
111
112 if os.path.exists(pwndbg_custom_structure_path):
113 option = input(
114 message.notice(
115 "A custom structure was found with the given name, would you like to overwrite it? [y/n] "
116 )
117 )
118 if option != "y":
119 return
120
121 print(
122 message.notice("Enter your custom structure in a C header style, press Ctrl+D to save:\n")
123 )
124
125 custom_structures_source = sys.stdin.read().strip()
126 if custom_structures_source == "":
127 print(message.notice("An empty structure is entered, skipping ..."))
128 return
129
130 with open(pwndbg_custom_structure_path, "w") as f:
131 f.write(custom_structures_source)
132
133 # Avoid checking for file existance. Call the decorator wrapper directly.
134 load_custom_structure.__wrapped__(custom_structure_name, pwndbg_custom_structure_path)
135
136
137 @OnlyWhenStructFileExists
138 def edit_custom_structure(custom_structure_name, custom_structure_path):
139
140 # Lookup an editor to use for editing the custom structure.
141 editor_preference = os.getenv("EDITOR")
142 if not editor_preference:
143 editor_preference = os.getenv("VISUAL")
144 if not editor_preference:
145 editor_preference = "vi"
146
147 if cymbol_editor != "":
148 editor_preference = cymbol_editor
149
150 try:
151 subprocess.run(
152 [editor_preference, custom_structure_path],
153 check=True,
154 )
155 except Exception as exception:
156 print(message.error("An error occured during opening the source file."))
157 print(message.error(f"Path to the custom structure: {custom_structure_path}"))
158 print(message.error("Please try to manually edit the structure."))
159 print(
160 message.error(
161 '\nTry to set a path to an editor with:\n\tset "cymbol-editor" /usr/bin/nano'
162 )
163 )
164 return
165
166 input(message.notice("Press enter when finished editing."))
167
168 load_custom_structure(custom_structure_name)
169
170
171 @OnlyWhenStructFileExists
172 def remove_custom_structure(custom_structure_name, custom_structure_path):
173 unload_loaded_symbol(custom_structure_name)
174 os.remove(custom_structure_path)
175 print(message.success("Symbols are removed!"))
176
177
178 @OnlyWhenStructFileExists
179 def load_custom_structure(custom_structure_name, custom_structure_path):
180 unload_loaded_symbol(custom_structure_name)
181 pwndbg_debug_symbols_output_file = generate_debug_symbols(custom_structure_path)
182 if not pwndbg_debug_symbols_output_file:
183 return # generate_debug_symbols prints on failures
184 gdb.execute(f"add-symbol-file {pwndbg_debug_symbols_output_file}", to_string=True)
185 loaded_symbols[custom_structure_name] = pwndbg_debug_symbols_output_file
186 print(message.success("Symbols are loaded!"))
187
188
189 @OnlyWhenStructFileExists
190 def show_custom_structure(custom_structure_name, custom_structure_path):
191 # Call wrapper .func() to avoid memoization.
192 highlighted_source = pwndbg.pwndbg.commands.context.get_highlight_source.func(
193 custom_structure_path
194 )
195 print("\n".join(highlighted_source))
196
197
198 parser = argparse.ArgumentParser(
199 description="Add, show, load, edit, or delete custom structures in plain C"
200 )
201 parser.add_argument(
202 "-a",
203 "--add",
204 metavar="name",
205 help="Add a new custom structure",
206 default=None,
207 type=str,
208 )
209 parser.add_argument(
210 "-r",
211 "--remove",
212 metavar="name",
213 help="Remove an existing custom structure",
214 default=None,
215 type=str,
216 )
217 parser.add_argument(
218 "-e",
219 "--edit",
220 metavar="name",
221 help="Edit an existing custom structure",
222 default=None,
223 type=str,
224 )
225 parser.add_argument(
226 "-l",
227 "--load",
228 metavar="name",
229 help="Load an existing custom structure",
230 default=None,
231 type=str,
232 )
233 parser.add_argument(
234 "-s",
235 "--show",
236 metavar="name",
237 help="Show the source code of an existing custom structure",
238 default=None,
239 type=str,
240 )
241
242
243 @pwndbg.commands.ArgparsedCommand(parser)
244 @pwndbg.commands.OnlyAmd64
245 @pwndbg.commands.OnlyWhenRunning
246 def cymbol(add, remove, edit, load, show):
247 if add:
248 add_custom_structure(add)
249 elif remove:
250 remove_custom_structure(remove)
251 elif edit:
252 edit_custom_structure(edit)
253 elif load:
254 load_custom_structure(load)
255 elif show:
256 show_custom_structure(show)
257 else:
258 parser.print_help()
259
[end of pwndbg/commands/cymbol.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwndbg/commands/cymbol.py b/pwndbg/commands/cymbol.py
--- a/pwndbg/commands/cymbol.py
+++ b/pwndbg/commands/cymbol.py
@@ -181,7 +181,10 @@
pwndbg_debug_symbols_output_file = generate_debug_symbols(custom_structure_path)
if not pwndbg_debug_symbols_output_file:
return # generate_debug_symbols prints on failures
- gdb.execute(f"add-symbol-file {pwndbg_debug_symbols_output_file}", to_string=True)
+ # Old GDB versions (e.g. 8.2) requires addr argument in add-symbol-file
+ # we set that address to which to load the symbols to 0 since it doesn't matter here
+ # (because we are only loading types information)
+ gdb.execute(f"add-symbol-file {pwndbg_debug_symbols_output_file} 0", to_string=True)
loaded_symbols[custom_structure_name] = pwndbg_debug_symbols_output_file
print(message.success("Symbols are loaded!"))
| {"golden_diff": "diff --git a/pwndbg/commands/cymbol.py b/pwndbg/commands/cymbol.py\n--- a/pwndbg/commands/cymbol.py\n+++ b/pwndbg/commands/cymbol.py\n@@ -181,7 +181,10 @@\n pwndbg_debug_symbols_output_file = generate_debug_symbols(custom_structure_path)\n if not pwndbg_debug_symbols_output_file:\n return # generate_debug_symbols prints on failures\n- gdb.execute(f\"add-symbol-file {pwndbg_debug_symbols_output_file}\", to_string=True)\n+ # Old GDB versions (e.g. 8.2) requires addr argument in add-symbol-file\n+ # we set that address to which to load the symbols to 0 since it doesn't matter here\n+ # (because we are only loading types information)\n+ gdb.execute(f\"add-symbol-file {pwndbg_debug_symbols_output_file} 0\", to_string=True)\n loaded_symbols[custom_structure_name] = pwndbg_debug_symbols_output_file\n print(message.success(\"Symbols are loaded!\"))\n", "issue": "Fix cymbol command on Ubuntu 18.04\nThere's a cymbol test that fails on the Ubuntu 18.04 CI, this issue is there to track it\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nAdd, load, show, edit, or delete symbols for custom structures.\n\nFor the generation of the symbols g++/gcc is being used under the hood.\n\nIn case of remote debugging a binary which is not native to your architecture it\nis advised to configure the 'gcc-config-path' config parameter to your own cross-platform\ngnu gcc compiled toolchain for your target architecture.\n\nYou are advised to configure the 'cymbol-editor' config parameter to the path of your\nfavorite text editor. Otherwise cymbol exapnds $EDITOR and $VISUAL environment variables\nto find the path to the default text editor.\n\"\"\"\n\nimport argparse\nimport functools\nimport os\nimport subprocess\nimport sys\nimport tempfile\n\nimport gdb\n\nimport pwndbg\nimport pwndbg.commands\nimport pwndbg.gdblib.arch\nimport pwndbg.lib.gcc\nimport pwndbg.lib.tempfile\nfrom pwndbg.color import message\n\ngcc_compiler_path = pwndbg.gdblib.config.add_param(\n \"gcc-compiler-path\",\n \"\",\n \"Path to the gcc/g++ toolchain for generating imported symbols\",\n)\n\ncymbol_editor = pwndbg.gdblib.config.add_param(\n \"cymbol-editor\", \"\", \"Path to the editor for editing custom structures\"\n)\n\n# Remeber loaded symbols. This would be useful for 'remove-symbol-file'.\nloaded_symbols = {}\n\n# Where generated symbol source files are saved.\npwndbg_cachedir = pwndbg.lib.tempfile.cachedir(\"custom-symbols\")\n\n\ndef unload_loaded_symbol(custom_structure_name):\n custom_structure_symbols_file = loaded_symbols.get(custom_structure_name)\n if custom_structure_symbols_file is not None:\n gdb.execute(f\"remove-symbol-file {custom_structure_symbols_file}\")\n loaded_symbols.pop(custom_structure_name)\n\n\ndef OnlyWhenStructFileExists(func):\n @functools.wraps(func)\n def wrapper(custom_structure_name):\n pwndbg_custom_structure_path = os.path.join(pwndbg_cachedir, custom_structure_name) + \".c\"\n if not os.path.exists(pwndbg_custom_structure_path):\n print(message.error(\"No custom structure was found with the given name!\"))\n return\n return func(custom_structure_name, pwndbg_custom_structure_path)\n\n return wrapper\n\n\ndef generate_debug_symbols(custom_structure_path, pwndbg_debug_symbols_output_file=None):\n if not pwndbg_debug_symbols_output_file:\n _, pwndbg_debug_symbols_output_file = tempfile.mkstemp(prefix=\"custom-\", suffix=\".dbg\")\n\n # -fno-eliminate-unused-debug-types is a handy gcc flag that lets us extract debug symbols from non-used defined structures.\n gcc_extra_flags = [\n custom_structure_path,\n \"-c\",\n \"-g\",\n \"-fno-eliminate-unused-debug-types\",\n \"-o\",\n pwndbg_debug_symbols_output_file,\n ]\n\n # TODO: implement remote debugging support.\n gcc_flags = pwndbg.lib.gcc.which(pwndbg.gdblib.arch)\n if gcc_compiler_path != \"\":\n gcc_flags[0] = gcc_compiler_path\n\n gcc_cmd = gcc_flags + gcc_extra_flags\n\n try:\n subprocess.run(gcc_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)\n except subprocess.CalledProcessError as exception:\n print(message.error(exception))\n print(\n message.error(\n \"Failed to compile the .c file with custom structures. Please fix any compilation errors there may be.\"\n )\n )\n return None\n except Exception as exception:\n print(message.error(exception))\n print(message.error(\"An error occured while generating the debug symbols.\"))\n return None\n\n return pwndbg_debug_symbols_output_file\n\n\ndef add_custom_structure(custom_structure_name):\n pwndbg_custom_structure_path = os.path.join(pwndbg_cachedir, custom_structure_name) + \".c\"\n\n if os.path.exists(pwndbg_custom_structure_path):\n option = input(\n message.notice(\n \"A custom structure was found with the given name, would you like to overwrite it? [y/n] \"\n )\n )\n if option != \"y\":\n return\n\n print(\n message.notice(\"Enter your custom structure in a C header style, press Ctrl+D to save:\\n\")\n )\n\n custom_structures_source = sys.stdin.read().strip()\n if custom_structures_source == \"\":\n print(message.notice(\"An empty structure is entered, skipping ...\"))\n return\n\n with open(pwndbg_custom_structure_path, \"w\") as f:\n f.write(custom_structures_source)\n\n # Avoid checking for file existance. Call the decorator wrapper directly.\n load_custom_structure.__wrapped__(custom_structure_name, pwndbg_custom_structure_path)\n\n\n@OnlyWhenStructFileExists\ndef edit_custom_structure(custom_structure_name, custom_structure_path):\n\n # Lookup an editor to use for editing the custom structure.\n editor_preference = os.getenv(\"EDITOR\")\n if not editor_preference:\n editor_preference = os.getenv(\"VISUAL\")\n if not editor_preference:\n editor_preference = \"vi\"\n\n if cymbol_editor != \"\":\n editor_preference = cymbol_editor\n\n try:\n subprocess.run(\n [editor_preference, custom_structure_path],\n check=True,\n )\n except Exception as exception:\n print(message.error(\"An error occured during opening the source file.\"))\n print(message.error(f\"Path to the custom structure: {custom_structure_path}\"))\n print(message.error(\"Please try to manually edit the structure.\"))\n print(\n message.error(\n '\\nTry to set a path to an editor with:\\n\\tset \"cymbol-editor\" /usr/bin/nano'\n )\n )\n return\n\n input(message.notice(\"Press enter when finished editing.\"))\n\n load_custom_structure(custom_structure_name)\n\n\n@OnlyWhenStructFileExists\ndef remove_custom_structure(custom_structure_name, custom_structure_path):\n unload_loaded_symbol(custom_structure_name)\n os.remove(custom_structure_path)\n print(message.success(\"Symbols are removed!\"))\n\n\n@OnlyWhenStructFileExists\ndef load_custom_structure(custom_structure_name, custom_structure_path):\n unload_loaded_symbol(custom_structure_name)\n pwndbg_debug_symbols_output_file = generate_debug_symbols(custom_structure_path)\n if not pwndbg_debug_symbols_output_file:\n return # generate_debug_symbols prints on failures\n gdb.execute(f\"add-symbol-file {pwndbg_debug_symbols_output_file}\", to_string=True)\n loaded_symbols[custom_structure_name] = pwndbg_debug_symbols_output_file\n print(message.success(\"Symbols are loaded!\"))\n\n\n@OnlyWhenStructFileExists\ndef show_custom_structure(custom_structure_name, custom_structure_path):\n # Call wrapper .func() to avoid memoization.\n highlighted_source = pwndbg.pwndbg.commands.context.get_highlight_source.func(\n custom_structure_path\n )\n print(\"\\n\".join(highlighted_source))\n\n\nparser = argparse.ArgumentParser(\n description=\"Add, show, load, edit, or delete custom structures in plain C\"\n)\nparser.add_argument(\n \"-a\",\n \"--add\",\n metavar=\"name\",\n help=\"Add a new custom structure\",\n default=None,\n type=str,\n)\nparser.add_argument(\n \"-r\",\n \"--remove\",\n metavar=\"name\",\n help=\"Remove an existing custom structure\",\n default=None,\n type=str,\n)\nparser.add_argument(\n \"-e\",\n \"--edit\",\n metavar=\"name\",\n help=\"Edit an existing custom structure\",\n default=None,\n type=str,\n)\nparser.add_argument(\n \"-l\",\n \"--load\",\n metavar=\"name\",\n help=\"Load an existing custom structure\",\n default=None,\n type=str,\n)\nparser.add_argument(\n \"-s\",\n \"--show\",\n metavar=\"name\",\n help=\"Show the source code of an existing custom structure\",\n default=None,\n type=str,\n)\n\n\[email protected](parser)\[email protected]\[email protected]\ndef cymbol(add, remove, edit, load, show):\n if add:\n add_custom_structure(add)\n elif remove:\n remove_custom_structure(remove)\n elif edit:\n edit_custom_structure(edit)\n elif load:\n load_custom_structure(load)\n elif show:\n show_custom_structure(show)\n else:\n parser.print_help()\n", "path": "pwndbg/commands/cymbol.py"}]} | 3,037 | 230 |
gh_patches_debug_7509 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-3095 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError: 'coroutine' object has no attribute 'add_done_callback'
### Which version of dd-trace-py are you using?
ddtrace 0.57.1
### Which version of pip are you using?
pip 21.3.1
### Which version of the libraries are you using?
aioredis 1.3.1
### What is the result that you get?
```
File "aioredis/commands/list.py", line 97, in lrange
return self.execute(b'LRANGE', key, start, stop, encoding=encoding)
File "ddtrace/contrib/aioredis/patch.py", line 147, in traced_13_execute_command
task.add_done_callback(_finish_span)
```
### What is the result that you expected?
No errors
</issue>
<code>
[start of ddtrace/contrib/aioredis/patch.py]
1 import sys
2
3 import aioredis
4
5 from ddtrace import config
6 from ddtrace.internal.utils.wrappers import unwrap as _u
7 from ddtrace.pin import Pin
8 from ddtrace.vendor.wrapt import wrap_function_wrapper as _w
9
10 from .. import trace_utils
11 from ...constants import ANALYTICS_SAMPLE_RATE_KEY
12 from ...constants import SPAN_MEASURED_KEY
13 from ...ext import SpanTypes
14 from ...ext import net
15 from ...ext import redis as redisx
16 from ..redis.util import _trace_redis_cmd
17 from ..redis.util import _trace_redis_execute_pipeline
18 from ..redis.util import format_command_args
19
20
21 try:
22 from aioredis.commands.transaction import _RedisBuffer
23 except ImportError:
24 _RedisBuffer = None
25
26 config._add("aioredis", dict(_default_service="redis"))
27
28 aioredis_version_str = getattr(aioredis, "__version__", "0.0.0")
29 aioredis_version = tuple([int(i) for i in aioredis_version_str.split(".")])
30
31
32 def patch():
33 if getattr(aioredis, "_datadog_patch", False):
34 return
35 setattr(aioredis, "_datadog_patch", True)
36 pin = Pin()
37 if aioredis_version >= (2, 0):
38 _w("aioredis.client", "Redis.execute_command", traced_execute_command)
39 _w("aioredis.client", "Redis.pipeline", traced_pipeline)
40 _w("aioredis.client", "Pipeline.execute", traced_execute_pipeline)
41 pin.onto(aioredis.client.Redis)
42 else:
43 _w("aioredis", "Redis.execute", traced_13_execute_command)
44 _w("aioredis", "Redis.pipeline", traced_13_pipeline)
45 _w("aioredis.commands.transaction", "Pipeline.execute", traced_13_execute_pipeline)
46 pin.onto(aioredis.Redis)
47
48
49 def unpatch():
50 if not getattr(aioredis, "_datadog_patch", False):
51 return
52
53 setattr(aioredis, "_datadog_patch", False)
54 if aioredis_version >= (2, 0):
55 _u(aioredis.client.Redis, "execute_command")
56 _u(aioredis.client.Redis, "pipeline")
57 _u(aioredis.client.Pipeline, "execute")
58 else:
59 _u(aioredis.Redis, "execute")
60 _u(aioredis.Redis, "pipeline")
61 _u(aioredis.commands.transaction.Pipeline, "execute")
62
63
64 async def traced_execute_command(func, instance, args, kwargs):
65 pin = Pin.get_from(instance)
66 if not pin or not pin.enabled():
67 return await func(*args, **kwargs)
68
69 with _trace_redis_cmd(pin, config.aioredis, instance, args):
70 return await func(*args, **kwargs)
71
72
73 def traced_pipeline(func, instance, args, kwargs):
74 pipeline = func(*args, **kwargs)
75 pin = Pin.get_from(instance)
76 if pin:
77 pin.onto(pipeline)
78 return pipeline
79
80
81 async def traced_execute_pipeline(func, instance, args, kwargs):
82 pin = Pin.get_from(instance)
83 if not pin or not pin.enabled():
84 return await func(*args, **kwargs)
85
86 cmds = [format_command_args(c) for c, _ in instance.command_stack]
87 resource = "\n".join(cmds)
88 with _trace_redis_execute_pipeline(pin, config.aioredis, resource, instance):
89 return await func(*args, **kwargs)
90
91
92 def traced_13_pipeline(func, instance, args, kwargs):
93 pipeline = func(*args, **kwargs)
94 pin = Pin.get_from(instance)
95 if pin:
96 pin.onto(pipeline)
97 return pipeline
98
99
100 def traced_13_execute_command(func, instance, args, kwargs):
101 # If we have a _RedisBuffer then we are in a pipeline
102 if isinstance(instance.connection, _RedisBuffer):
103 return func(*args, **kwargs)
104
105 pin = Pin.get_from(instance)
106 if not pin or not pin.enabled():
107 return func(*args, **kwargs)
108
109 # Don't activate the span since this operation is performed as a future which concludes sometime later on in
110 # execution so subsequent operations in the stack are not necessarily semantically related
111 # (we don't want this span to be the parent of all other spans created before the future is resolved)
112 span = pin.tracer.start_span(
113 redisx.CMD, service=trace_utils.ext_service(pin, config.aioredis), span_type=SpanTypes.REDIS, activate=False
114 )
115
116 span.set_tag(SPAN_MEASURED_KEY)
117 query = format_command_args(args)
118 span.resource = query
119 span.set_tag(redisx.RAWCMD, query)
120 if pin.tags:
121 span.set_tags(pin.tags)
122
123 span.set_tags(
124 {
125 net.TARGET_HOST: instance.address[0],
126 net.TARGET_PORT: instance.address[1],
127 redisx.DB: instance.db or 0,
128 }
129 )
130 span.set_metric(redisx.ARGS_LEN, len(args))
131 # set analytics sample rate if enabled
132 span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())
133
134 def _finish_span(future):
135 try:
136 # Accessing the result will raise an exception if:
137 # - The future was cancelled
138 # - There was an error executing the future (`future.exception()`)
139 # - The future is in an invalid state
140 future.result()
141 except Exception:
142 span.set_exc_info(*sys.exc_info())
143 finally:
144 span.finish()
145
146 task = func(*args, **kwargs)
147 task.add_done_callback(_finish_span)
148 return task
149
150
151 async def traced_13_execute_pipeline(func, instance, args, kwargs):
152 pin = Pin.get_from(instance)
153 if not pin or not pin.enabled():
154 return await func(*args, **kwargs)
155
156 cmds = []
157 for _, cmd, cmd_args, _ in instance._pipeline:
158 parts = [cmd]
159 parts.extend(cmd_args)
160 cmds.append(format_command_args(parts))
161 resource = "\n".join(cmds)
162 with pin.tracer.trace(
163 redisx.CMD,
164 resource=resource,
165 service=trace_utils.ext_service(pin, config.aioredis),
166 span_type=SpanTypes.REDIS,
167 ) as span:
168
169 span.set_tags(
170 {
171 net.TARGET_HOST: instance._pool_or_conn.address[0],
172 net.TARGET_PORT: instance._pool_or_conn.address[1],
173 redisx.DB: instance._pool_or_conn.db or 0,
174 }
175 )
176
177 span.set_tag(SPAN_MEASURED_KEY)
178 span.set_tag(redisx.RAWCMD, resource)
179 span.set_metric(redisx.PIPELINE_LEN, len(instance._pipeline))
180 # set analytics sample rate if enabled
181 span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())
182
183 return await func(*args, **kwargs)
184
[end of ddtrace/contrib/aioredis/patch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ddtrace/contrib/aioredis/patch.py b/ddtrace/contrib/aioredis/patch.py
--- a/ddtrace/contrib/aioredis/patch.py
+++ b/ddtrace/contrib/aioredis/patch.py
@@ -1,3 +1,4 @@
+import asyncio
import sys
import aioredis
@@ -144,6 +145,9 @@
span.finish()
task = func(*args, **kwargs)
+ # Execute command returns a coroutine when no free connections are available
+ # https://github.com/aio-libs/aioredis-py/blob/v1.3.1/aioredis/pool.py#L191
+ task = asyncio.ensure_future(task)
task.add_done_callback(_finish_span)
return task
| {"golden_diff": "diff --git a/ddtrace/contrib/aioredis/patch.py b/ddtrace/contrib/aioredis/patch.py\n--- a/ddtrace/contrib/aioredis/patch.py\n+++ b/ddtrace/contrib/aioredis/patch.py\n@@ -1,3 +1,4 @@\n+import asyncio\n import sys\n \n import aioredis\n@@ -144,6 +145,9 @@\n span.finish()\n \n task = func(*args, **kwargs)\n+ # Execute command returns a coroutine when no free connections are available\n+ # https://github.com/aio-libs/aioredis-py/blob/v1.3.1/aioredis/pool.py#L191\n+ task = asyncio.ensure_future(task)\n task.add_done_callback(_finish_span)\n return task\n", "issue": "AttributeError: 'coroutine' object has no attribute 'add_done_callback'\n\r\n### Which version of dd-trace-py are you using?\r\n\r\nddtrace 0.57.1\r\n\r\n### Which version of pip are you using?\r\n\r\npip 21.3.1\r\n\r\n### Which version of the libraries are you using?\r\n\r\naioredis 1.3.1\r\n\r\n### What is the result that you get?\r\n\r\n```\r\n File \"aioredis/commands/list.py\", line 97, in lrange\r\n return self.execute(b'LRANGE', key, start, stop, encoding=encoding)\r\n File \"ddtrace/contrib/aioredis/patch.py\", line 147, in traced_13_execute_command\r\n task.add_done_callback(_finish_span)\r\n```\r\n\r\n### What is the result that you expected?\r\n\r\nNo errors\r\n\r\n\n", "before_files": [{"content": "import sys\n\nimport aioredis\n\nfrom ddtrace import config\nfrom ddtrace.internal.utils.wrappers import unwrap as _u\nfrom ddtrace.pin import Pin\nfrom ddtrace.vendor.wrapt import wrap_function_wrapper as _w\n\nfrom .. import trace_utils\nfrom ...constants import ANALYTICS_SAMPLE_RATE_KEY\nfrom ...constants import SPAN_MEASURED_KEY\nfrom ...ext import SpanTypes\nfrom ...ext import net\nfrom ...ext import redis as redisx\nfrom ..redis.util import _trace_redis_cmd\nfrom ..redis.util import _trace_redis_execute_pipeline\nfrom ..redis.util import format_command_args\n\n\ntry:\n from aioredis.commands.transaction import _RedisBuffer\nexcept ImportError:\n _RedisBuffer = None\n\nconfig._add(\"aioredis\", dict(_default_service=\"redis\"))\n\naioredis_version_str = getattr(aioredis, \"__version__\", \"0.0.0\")\naioredis_version = tuple([int(i) for i in aioredis_version_str.split(\".\")])\n\n\ndef patch():\n if getattr(aioredis, \"_datadog_patch\", False):\n return\n setattr(aioredis, \"_datadog_patch\", True)\n pin = Pin()\n if aioredis_version >= (2, 0):\n _w(\"aioredis.client\", \"Redis.execute_command\", traced_execute_command)\n _w(\"aioredis.client\", \"Redis.pipeline\", traced_pipeline)\n _w(\"aioredis.client\", \"Pipeline.execute\", traced_execute_pipeline)\n pin.onto(aioredis.client.Redis)\n else:\n _w(\"aioredis\", \"Redis.execute\", traced_13_execute_command)\n _w(\"aioredis\", \"Redis.pipeline\", traced_13_pipeline)\n _w(\"aioredis.commands.transaction\", \"Pipeline.execute\", traced_13_execute_pipeline)\n pin.onto(aioredis.Redis)\n\n\ndef unpatch():\n if not getattr(aioredis, \"_datadog_patch\", False):\n return\n\n setattr(aioredis, \"_datadog_patch\", False)\n if aioredis_version >= (2, 0):\n _u(aioredis.client.Redis, \"execute_command\")\n _u(aioredis.client.Redis, \"pipeline\")\n _u(aioredis.client.Pipeline, \"execute\")\n else:\n _u(aioredis.Redis, \"execute\")\n _u(aioredis.Redis, \"pipeline\")\n _u(aioredis.commands.transaction.Pipeline, \"execute\")\n\n\nasync def traced_execute_command(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n with _trace_redis_cmd(pin, config.aioredis, instance, args):\n return await func(*args, **kwargs)\n\n\ndef traced_pipeline(func, instance, args, kwargs):\n pipeline = func(*args, **kwargs)\n pin = Pin.get_from(instance)\n if pin:\n pin.onto(pipeline)\n return pipeline\n\n\nasync def traced_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n cmds = [format_command_args(c) for c, _ in instance.command_stack]\n resource = \"\\n\".join(cmds)\n with _trace_redis_execute_pipeline(pin, config.aioredis, resource, instance):\n return await func(*args, **kwargs)\n\n\ndef traced_13_pipeline(func, instance, args, kwargs):\n pipeline = func(*args, **kwargs)\n pin = Pin.get_from(instance)\n if pin:\n pin.onto(pipeline)\n return pipeline\n\n\ndef traced_13_execute_command(func, instance, args, kwargs):\n # If we have a _RedisBuffer then we are in a pipeline\n if isinstance(instance.connection, _RedisBuffer):\n return func(*args, **kwargs)\n\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n # Don't activate the span since this operation is performed as a future which concludes sometime later on in\n # execution so subsequent operations in the stack are not necessarily semantically related\n # (we don't want this span to be the parent of all other spans created before the future is resolved)\n span = pin.tracer.start_span(\n redisx.CMD, service=trace_utils.ext_service(pin, config.aioredis), span_type=SpanTypes.REDIS, activate=False\n )\n\n span.set_tag(SPAN_MEASURED_KEY)\n query = format_command_args(args)\n span.resource = query\n span.set_tag(redisx.RAWCMD, query)\n if pin.tags:\n span.set_tags(pin.tags)\n\n span.set_tags(\n {\n net.TARGET_HOST: instance.address[0],\n net.TARGET_PORT: instance.address[1],\n redisx.DB: instance.db or 0,\n }\n )\n span.set_metric(redisx.ARGS_LEN, len(args))\n # set analytics sample rate if enabled\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())\n\n def _finish_span(future):\n try:\n # Accessing the result will raise an exception if:\n # - The future was cancelled\n # - There was an error executing the future (`future.exception()`)\n # - The future is in an invalid state\n future.result()\n except Exception:\n span.set_exc_info(*sys.exc_info())\n finally:\n span.finish()\n\n task = func(*args, **kwargs)\n task.add_done_callback(_finish_span)\n return task\n\n\nasync def traced_13_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n cmds = []\n for _, cmd, cmd_args, _ in instance._pipeline:\n parts = [cmd]\n parts.extend(cmd_args)\n cmds.append(format_command_args(parts))\n resource = \"\\n\".join(cmds)\n with pin.tracer.trace(\n redisx.CMD,\n resource=resource,\n service=trace_utils.ext_service(pin, config.aioredis),\n span_type=SpanTypes.REDIS,\n ) as span:\n\n span.set_tags(\n {\n net.TARGET_HOST: instance._pool_or_conn.address[0],\n net.TARGET_PORT: instance._pool_or_conn.address[1],\n redisx.DB: instance._pool_or_conn.db or 0,\n }\n )\n\n span.set_tag(SPAN_MEASURED_KEY)\n span.set_tag(redisx.RAWCMD, resource)\n span.set_metric(redisx.PIPELINE_LEN, len(instance._pipeline))\n # set analytics sample rate if enabled\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())\n\n return await func(*args, **kwargs)\n", "path": "ddtrace/contrib/aioredis/patch.py"}]} | 2,687 | 181 |
gh_patches_debug_23684 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-4964 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Addons: `running` is invoked twice.
##### Steps to reproduce the problem:
1. Write an addon that implements `running()`
2. Running is invoked twice on startup.
Addons: `running` is invoked twice.
##### Steps to reproduce the problem:
1. Write an addon that implements `running()`
2. Running is invoked twice on startup.
</issue>
<code>
[start of mitmproxy/addons/script.py]
1 import asyncio
2 import os
3 import importlib.util
4 import importlib.machinery
5 import sys
6 import types
7 import typing
8 import traceback
9
10 from mitmproxy import addonmanager, hooks
11 from mitmproxy import exceptions
12 from mitmproxy import flow
13 from mitmproxy import command
14 from mitmproxy import eventsequence
15 from mitmproxy import ctx
16 import mitmproxy.types as mtypes
17
18
19 def load_script(path: str) -> typing.Optional[types.ModuleType]:
20 fullname = "__mitmproxy_script__.{}".format(
21 os.path.splitext(os.path.basename(path))[0]
22 )
23 # the fullname is not unique among scripts, so if there already is an existing script with said
24 # fullname, remove it.
25 sys.modules.pop(fullname, None)
26 oldpath = sys.path
27 sys.path.insert(0, os.path.dirname(path))
28 m = None
29 try:
30 loader = importlib.machinery.SourceFileLoader(fullname, path)
31 spec = importlib.util.spec_from_loader(fullname, loader=loader)
32 assert spec
33 m = importlib.util.module_from_spec(spec)
34 loader.exec_module(m)
35 if not getattr(m, "name", None):
36 m.name = path # type: ignore
37 except Exception as e:
38 script_error_handler(path, e, msg=str(e))
39 finally:
40 sys.path[:] = oldpath
41 return m
42
43
44 def script_error_handler(path, exc, msg="", tb=False):
45 """
46 Handles all the user's script errors with
47 an optional traceback
48 """
49 exception = type(exc).__name__
50 if msg:
51 exception = msg
52 lineno = ""
53 if hasattr(exc, "lineno"):
54 lineno = str(exc.lineno)
55 log_msg = f"in script {path}:{lineno} {exception}"
56 if tb:
57 etype, value, tback = sys.exc_info()
58 tback = addonmanager.cut_traceback(tback, "invoke_addon")
59 log_msg = log_msg + "\n" + "".join(traceback.format_exception(etype, value, tback))
60 ctx.log.error(log_msg)
61
62
63 ReloadInterval = 1
64
65
66 class Script:
67 """
68 An addon that manages a single script.
69 """
70
71 def __init__(self, path: str, reload: bool) -> None:
72 self.name = "scriptmanager:" + path
73 self.path = path
74 self.fullpath = os.path.expanduser(
75 path.strip("'\" ")
76 )
77 self.ns = None
78
79 if not os.path.isfile(self.fullpath):
80 raise exceptions.OptionsError('No such script')
81
82 self.reloadtask = None
83 if reload:
84 self.reloadtask = asyncio.ensure_future(self.watcher())
85 else:
86 self.loadscript()
87
88 def done(self):
89 if self.reloadtask:
90 self.reloadtask.cancel()
91
92 @property
93 def addons(self):
94 return [self.ns] if self.ns else []
95
96 def loadscript(self):
97 ctx.log.info("Loading script %s" % self.path)
98 if self.ns:
99 ctx.master.addons.remove(self.ns)
100 self.ns = None
101 with addonmanager.safecall():
102 ns = load_script(self.fullpath)
103 ctx.master.addons.register(ns)
104 self.ns = ns
105 if self.ns:
106 # We're already running, so we have to explicitly register and
107 # configure the addon
108 ctx.master.addons.invoke_addon(self.ns, hooks.RunningHook())
109 try:
110 ctx.master.addons.invoke_addon(
111 self.ns,
112 hooks.ConfigureHook(ctx.options.keys())
113 )
114 except exceptions.OptionsError as e:
115 script_error_handler(self.fullpath, e, msg=str(e))
116
117 async def watcher(self):
118 last_mtime = 0
119 while True:
120 try:
121 mtime = os.stat(self.fullpath).st_mtime
122 except FileNotFoundError:
123 ctx.log.info("Removing script %s" % self.path)
124 scripts = list(ctx.options.scripts)
125 scripts.remove(self.path)
126 ctx.options.update(scripts=scripts)
127 return
128 if mtime > last_mtime:
129 self.loadscript()
130 last_mtime = mtime
131 await asyncio.sleep(ReloadInterval)
132
133
134 class ScriptLoader:
135 """
136 An addon that manages loading scripts from options.
137 """
138 def __init__(self):
139 self.is_running = False
140 self.addons = []
141
142 def load(self, loader):
143 loader.add_option(
144 "scripts", typing.Sequence[str], [],
145 "Execute a script."
146 )
147
148 def running(self):
149 self.is_running = True
150
151 @command.command("script.run")
152 def script_run(self, flows: typing.Sequence[flow.Flow], path: mtypes.Path) -> None:
153 """
154 Run a script on the specified flows. The script is configured with
155 the current options and all lifecycle events for each flow are
156 simulated. Note that the load event is not invoked.
157 """
158 if not os.path.isfile(path):
159 ctx.log.error('No such script: %s' % path)
160 return
161 mod = load_script(path)
162 if mod:
163 with addonmanager.safecall():
164 ctx.master.addons.invoke_addon(mod, hooks.RunningHook())
165 ctx.master.addons.invoke_addon(
166 mod,
167 hooks.ConfigureHook(ctx.options.keys()),
168 )
169 for f in flows:
170 for evt in eventsequence.iterate(f):
171 ctx.master.addons.invoke_addon(mod, evt)
172
173 def configure(self, updated):
174 if "scripts" in updated:
175 for s in ctx.options.scripts:
176 if ctx.options.scripts.count(s) > 1:
177 raise exceptions.OptionsError("Duplicate script")
178
179 for a in self.addons[:]:
180 if a.path not in ctx.options.scripts:
181 ctx.log.info("Un-loading script: %s" % a.path)
182 ctx.master.addons.remove(a)
183 self.addons.remove(a)
184
185 # The machinations below are to ensure that:
186 # - Scripts remain in the same order
187 # - Scripts are not initialized un-necessarily. If only a
188 # script's order in the script list has changed, it is just
189 # moved.
190
191 current = {}
192 for a in self.addons:
193 current[a.path] = a
194
195 ordered = []
196 newscripts = []
197 for s in ctx.options.scripts:
198 if s in current:
199 ordered.append(current[s])
200 else:
201 sc = Script(s, True)
202 ordered.append(sc)
203 newscripts.append(sc)
204
205 self.addons = ordered
206
207 for s in newscripts:
208 ctx.master.addons.register(s)
209 if self.is_running:
210 # If we're already running, we configure and tell the addon
211 # we're up and running.
212 ctx.master.addons.invoke_addon(s, hooks.RunningHook())
213
[end of mitmproxy/addons/script.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mitmproxy/addons/script.py b/mitmproxy/addons/script.py
--- a/mitmproxy/addons/script.py
+++ b/mitmproxy/addons/script.py
@@ -75,6 +75,7 @@
path.strip("'\" ")
)
self.ns = None
+ self.is_running = False
if not os.path.isfile(self.fullpath):
raise exceptions.OptionsError('No such script')
@@ -85,6 +86,9 @@
else:
self.loadscript()
+ def running(self):
+ self.is_running = True
+
def done(self):
if self.reloadtask:
self.reloadtask.cancel()
@@ -105,7 +109,8 @@
if self.ns:
# We're already running, so we have to explicitly register and
# configure the addon
- ctx.master.addons.invoke_addon(self.ns, hooks.RunningHook())
+ if self.is_running:
+ ctx.master.addons.invoke_addon(self.ns, hooks.RunningHook())
try:
ctx.master.addons.invoke_addon(
self.ns,
| {"golden_diff": "diff --git a/mitmproxy/addons/script.py b/mitmproxy/addons/script.py\n--- a/mitmproxy/addons/script.py\n+++ b/mitmproxy/addons/script.py\n@@ -75,6 +75,7 @@\n path.strip(\"'\\\" \")\n )\n self.ns = None\n+ self.is_running = False\n \n if not os.path.isfile(self.fullpath):\n raise exceptions.OptionsError('No such script')\n@@ -85,6 +86,9 @@\n else:\n self.loadscript()\n \n+ def running(self):\n+ self.is_running = True\n+\n def done(self):\n if self.reloadtask:\n self.reloadtask.cancel()\n@@ -105,7 +109,8 @@\n if self.ns:\n # We're already running, so we have to explicitly register and\n # configure the addon\n- ctx.master.addons.invoke_addon(self.ns, hooks.RunningHook())\n+ if self.is_running:\n+ ctx.master.addons.invoke_addon(self.ns, hooks.RunningHook())\n try:\n ctx.master.addons.invoke_addon(\n self.ns,\n", "issue": "Addons: `running` is invoked twice.\n##### Steps to reproduce the problem:\r\n\r\n1. Write an addon that implements `running()`\r\n2. Running is invoked twice on startup.\r\n\nAddons: `running` is invoked twice.\n##### Steps to reproduce the problem:\r\n\r\n1. Write an addon that implements `running()`\r\n2. Running is invoked twice on startup.\r\n\n", "before_files": [{"content": "import asyncio\nimport os\nimport importlib.util\nimport importlib.machinery\nimport sys\nimport types\nimport typing\nimport traceback\n\nfrom mitmproxy import addonmanager, hooks\nfrom mitmproxy import exceptions\nfrom mitmproxy import flow\nfrom mitmproxy import command\nfrom mitmproxy import eventsequence\nfrom mitmproxy import ctx\nimport mitmproxy.types as mtypes\n\n\ndef load_script(path: str) -> typing.Optional[types.ModuleType]:\n fullname = \"__mitmproxy_script__.{}\".format(\n os.path.splitext(os.path.basename(path))[0]\n )\n # the fullname is not unique among scripts, so if there already is an existing script with said\n # fullname, remove it.\n sys.modules.pop(fullname, None)\n oldpath = sys.path\n sys.path.insert(0, os.path.dirname(path))\n m = None\n try:\n loader = importlib.machinery.SourceFileLoader(fullname, path)\n spec = importlib.util.spec_from_loader(fullname, loader=loader)\n assert spec\n m = importlib.util.module_from_spec(spec)\n loader.exec_module(m)\n if not getattr(m, \"name\", None):\n m.name = path # type: ignore\n except Exception as e:\n script_error_handler(path, e, msg=str(e))\n finally:\n sys.path[:] = oldpath\n return m\n\n\ndef script_error_handler(path, exc, msg=\"\", tb=False):\n \"\"\"\n Handles all the user's script errors with\n an optional traceback\n \"\"\"\n exception = type(exc).__name__\n if msg:\n exception = msg\n lineno = \"\"\n if hasattr(exc, \"lineno\"):\n lineno = str(exc.lineno)\n log_msg = f\"in script {path}:{lineno} {exception}\"\n if tb:\n etype, value, tback = sys.exc_info()\n tback = addonmanager.cut_traceback(tback, \"invoke_addon\")\n log_msg = log_msg + \"\\n\" + \"\".join(traceback.format_exception(etype, value, tback))\n ctx.log.error(log_msg)\n\n\nReloadInterval = 1\n\n\nclass Script:\n \"\"\"\n An addon that manages a single script.\n \"\"\"\n\n def __init__(self, path: str, reload: bool) -> None:\n self.name = \"scriptmanager:\" + path\n self.path = path\n self.fullpath = os.path.expanduser(\n path.strip(\"'\\\" \")\n )\n self.ns = None\n\n if not os.path.isfile(self.fullpath):\n raise exceptions.OptionsError('No such script')\n\n self.reloadtask = None\n if reload:\n self.reloadtask = asyncio.ensure_future(self.watcher())\n else:\n self.loadscript()\n\n def done(self):\n if self.reloadtask:\n self.reloadtask.cancel()\n\n @property\n def addons(self):\n return [self.ns] if self.ns else []\n\n def loadscript(self):\n ctx.log.info(\"Loading script %s\" % self.path)\n if self.ns:\n ctx.master.addons.remove(self.ns)\n self.ns = None\n with addonmanager.safecall():\n ns = load_script(self.fullpath)\n ctx.master.addons.register(ns)\n self.ns = ns\n if self.ns:\n # We're already running, so we have to explicitly register and\n # configure the addon\n ctx.master.addons.invoke_addon(self.ns, hooks.RunningHook())\n try:\n ctx.master.addons.invoke_addon(\n self.ns,\n hooks.ConfigureHook(ctx.options.keys())\n )\n except exceptions.OptionsError as e:\n script_error_handler(self.fullpath, e, msg=str(e))\n\n async def watcher(self):\n last_mtime = 0\n while True:\n try:\n mtime = os.stat(self.fullpath).st_mtime\n except FileNotFoundError:\n ctx.log.info(\"Removing script %s\" % self.path)\n scripts = list(ctx.options.scripts)\n scripts.remove(self.path)\n ctx.options.update(scripts=scripts)\n return\n if mtime > last_mtime:\n self.loadscript()\n last_mtime = mtime\n await asyncio.sleep(ReloadInterval)\n\n\nclass ScriptLoader:\n \"\"\"\n An addon that manages loading scripts from options.\n \"\"\"\n def __init__(self):\n self.is_running = False\n self.addons = []\n\n def load(self, loader):\n loader.add_option(\n \"scripts\", typing.Sequence[str], [],\n \"Execute a script.\"\n )\n\n def running(self):\n self.is_running = True\n\n @command.command(\"script.run\")\n def script_run(self, flows: typing.Sequence[flow.Flow], path: mtypes.Path) -> None:\n \"\"\"\n Run a script on the specified flows. The script is configured with\n the current options and all lifecycle events for each flow are\n simulated. Note that the load event is not invoked.\n \"\"\"\n if not os.path.isfile(path):\n ctx.log.error('No such script: %s' % path)\n return\n mod = load_script(path)\n if mod:\n with addonmanager.safecall():\n ctx.master.addons.invoke_addon(mod, hooks.RunningHook())\n ctx.master.addons.invoke_addon(\n mod,\n hooks.ConfigureHook(ctx.options.keys()),\n )\n for f in flows:\n for evt in eventsequence.iterate(f):\n ctx.master.addons.invoke_addon(mod, evt)\n\n def configure(self, updated):\n if \"scripts\" in updated:\n for s in ctx.options.scripts:\n if ctx.options.scripts.count(s) > 1:\n raise exceptions.OptionsError(\"Duplicate script\")\n\n for a in self.addons[:]:\n if a.path not in ctx.options.scripts:\n ctx.log.info(\"Un-loading script: %s\" % a.path)\n ctx.master.addons.remove(a)\n self.addons.remove(a)\n\n # The machinations below are to ensure that:\n # - Scripts remain in the same order\n # - Scripts are not initialized un-necessarily. If only a\n # script's order in the script list has changed, it is just\n # moved.\n\n current = {}\n for a in self.addons:\n current[a.path] = a\n\n ordered = []\n newscripts = []\n for s in ctx.options.scripts:\n if s in current:\n ordered.append(current[s])\n else:\n sc = Script(s, True)\n ordered.append(sc)\n newscripts.append(sc)\n\n self.addons = ordered\n\n for s in newscripts:\n ctx.master.addons.register(s)\n if self.is_running:\n # If we're already running, we configure and tell the addon\n # we're up and running.\n ctx.master.addons.invoke_addon(s, hooks.RunningHook())\n", "path": "mitmproxy/addons/script.py"}]} | 2,617 | 244 |
gh_patches_debug_26174 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-776 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove botan from our library
according to [this](https://github.com/botanio/sdk#py) botan has it's own implementation for python. No need to reinvent the wheel. I suggest we remove it from ptb in the next major (8.0) version.
</issue>
<code>
[start of telegram/contrib/__init__.py]
1 from .botan import Botan
2
3 __all__ = ['Botan']
4
[end of telegram/contrib/__init__.py]
[start of telegram/contrib/botan.py]
1 import logging
2
3 from future.moves.urllib.parse import quote
4 from future.moves.urllib.error import HTTPError, URLError
5 from future.moves.urllib.request import urlopen, Request
6
7 logging.getLogger(__name__).addHandler(logging.NullHandler())
8
9
10 class Botan(object):
11 """This class helps to send incoming events to your botan analytics account.
12 See more: https://github.com/botanio/sdk#botan-sdk
13 """
14
15 token = ''
16 url_template = 'https://api.botan.io/track?token={token}' \
17 '&uid={uid}&name={name}&src=python-telegram-bot'
18
19 def __init__(self, token):
20 self.token = token
21 self.logger = logging.getLogger(__name__)
22
23 def track(self, message, event_name='event'):
24 try:
25 uid = message.chat_id
26 except AttributeError:
27 self.logger.warn('No chat_id in message')
28 return False
29 data = message.to_json()
30 try:
31 url = self.url_template.format(
32 token=str(self.token), uid=str(uid), name=quote(event_name))
33 request = Request(
34 url, data=data.encode(), headers={'Content-Type': 'application/json'})
35 urlopen(request)
36 return True
37 except HTTPError as error:
38 self.logger.warn('Botan track error ' + str(error.code) + ':' + error.read().decode(
39 'utf-8'))
40 return False
41 except URLError as error:
42 self.logger.warn('Botan track error ' + str(error.reason))
43 return False
44
[end of telegram/contrib/botan.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/telegram/contrib/__init__.py b/telegram/contrib/__init__.py
deleted file mode 100644
--- a/telegram/contrib/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from .botan import Botan
-
-__all__ = ['Botan']
diff --git a/telegram/contrib/botan.py b/telegram/contrib/botan.py
deleted file mode 100644
--- a/telegram/contrib/botan.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import logging
-
-from future.moves.urllib.parse import quote
-from future.moves.urllib.error import HTTPError, URLError
-from future.moves.urllib.request import urlopen, Request
-
-logging.getLogger(__name__).addHandler(logging.NullHandler())
-
-
-class Botan(object):
- """This class helps to send incoming events to your botan analytics account.
- See more: https://github.com/botanio/sdk#botan-sdk
- """
-
- token = ''
- url_template = 'https://api.botan.io/track?token={token}' \
- '&uid={uid}&name={name}&src=python-telegram-bot'
-
- def __init__(self, token):
- self.token = token
- self.logger = logging.getLogger(__name__)
-
- def track(self, message, event_name='event'):
- try:
- uid = message.chat_id
- except AttributeError:
- self.logger.warn('No chat_id in message')
- return False
- data = message.to_json()
- try:
- url = self.url_template.format(
- token=str(self.token), uid=str(uid), name=quote(event_name))
- request = Request(
- url, data=data.encode(), headers={'Content-Type': 'application/json'})
- urlopen(request)
- return True
- except HTTPError as error:
- self.logger.warn('Botan track error ' + str(error.code) + ':' + error.read().decode(
- 'utf-8'))
- return False
- except URLError as error:
- self.logger.warn('Botan track error ' + str(error.reason))
- return False
| {"golden_diff": "diff --git a/telegram/contrib/__init__.py b/telegram/contrib/__init__.py\ndeleted file mode 100644\n--- a/telegram/contrib/__init__.py\n+++ /dev/null\n@@ -1,3 +0,0 @@\n-from .botan import Botan\n-\n-__all__ = ['Botan']\ndiff --git a/telegram/contrib/botan.py b/telegram/contrib/botan.py\ndeleted file mode 100644\n--- a/telegram/contrib/botan.py\n+++ /dev/null\n@@ -1,43 +0,0 @@\n-import logging\n-\n-from future.moves.urllib.parse import quote\n-from future.moves.urllib.error import HTTPError, URLError\n-from future.moves.urllib.request import urlopen, Request\n-\n-logging.getLogger(__name__).addHandler(logging.NullHandler())\n-\n-\n-class Botan(object):\n- \"\"\"This class helps to send incoming events to your botan analytics account.\n- See more: https://github.com/botanio/sdk#botan-sdk\n- \"\"\"\n-\n- token = ''\n- url_template = 'https://api.botan.io/track?token={token}' \\\n- '&uid={uid}&name={name}&src=python-telegram-bot'\n-\n- def __init__(self, token):\n- self.token = token\n- self.logger = logging.getLogger(__name__)\n-\n- def track(self, message, event_name='event'):\n- try:\n- uid = message.chat_id\n- except AttributeError:\n- self.logger.warn('No chat_id in message')\n- return False\n- data = message.to_json()\n- try:\n- url = self.url_template.format(\n- token=str(self.token), uid=str(uid), name=quote(event_name))\n- request = Request(\n- url, data=data.encode(), headers={'Content-Type': 'application/json'})\n- urlopen(request)\n- return True\n- except HTTPError as error:\n- self.logger.warn('Botan track error ' + str(error.code) + ':' + error.read().decode(\n- 'utf-8'))\n- return False\n- except URLError as error:\n- self.logger.warn('Botan track error ' + str(error.reason))\n- return False\n", "issue": "Remove botan from our library\naccording to [this](https://github.com/botanio/sdk#py) botan has it's own implementation for python. No need to reinvent the wheel. I suggest we remove it from ptb in the next major (8.0) version.\n", "before_files": [{"content": "from .botan import Botan\n\n__all__ = ['Botan']\n", "path": "telegram/contrib/__init__.py"}, {"content": "import logging\n\nfrom future.moves.urllib.parse import quote\nfrom future.moves.urllib.error import HTTPError, URLError\nfrom future.moves.urllib.request import urlopen, Request\n\nlogging.getLogger(__name__).addHandler(logging.NullHandler())\n\n\nclass Botan(object):\n \"\"\"This class helps to send incoming events to your botan analytics account.\n See more: https://github.com/botanio/sdk#botan-sdk\n \"\"\"\n\n token = ''\n url_template = 'https://api.botan.io/track?token={token}' \\\n '&uid={uid}&name={name}&src=python-telegram-bot'\n\n def __init__(self, token):\n self.token = token\n self.logger = logging.getLogger(__name__)\n\n def track(self, message, event_name='event'):\n try:\n uid = message.chat_id\n except AttributeError:\n self.logger.warn('No chat_id in message')\n return False\n data = message.to_json()\n try:\n url = self.url_template.format(\n token=str(self.token), uid=str(uid), name=quote(event_name))\n request = Request(\n url, data=data.encode(), headers={'Content-Type': 'application/json'})\n urlopen(request)\n return True\n except HTTPError as error:\n self.logger.warn('Botan track error ' + str(error.code) + ':' + error.read().decode(\n 'utf-8'))\n return False\n except URLError as error:\n self.logger.warn('Botan track error ' + str(error.reason))\n return False\n", "path": "telegram/contrib/botan.py"}]} | 1,050 | 498 |
gh_patches_debug_5151 | rasdani/github-patches | git_diff | pyca__cryptography-4289 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Raise clearer error message when passing a unicode value as a symmetric key
Python: 2.7.3
cryptography==2.1.4
cffi==1.11.5
pip 9.0.1
```
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
import os
iv = os.urandom(16)
cipher = Cipher(algorithms.AES(key), modes.CFB8(iv),
backend=default_backend())
decryptor = cipher.decryptor()
raw_text = decryptor.update(encoded_text) + decryptor.finalize()
```
If `key` is unicode it will raise error.
```
/env/local/lib/python2.7/site-packages/cryptography/hazmat/primitives/ciphers/base.pyc in decryptor(self)
125 def decryptor(self):
126 ctx = self._backend.create_symmetric_decryption_ctx(
--> 127 self.algorithm, self.mode
128 )
129 return self._wrap_ctx(ctx, encrypt=False)
/env/local/lib/python2.7/site-packages/cryptography/hazmat/backends/openssl/backend.pyc in create_symmetric_decryption_ctx(self, cipher, mode)
270
271 def create_symmetric_decryption_ctx(self, cipher, mode):
--> 272 return _CipherContext(self, cipher, mode, _CipherContext._DECRYPT)
273
274 def pbkdf2_hmac_supported(self, algorithm):
/env/local/lib/python2.7/site-packages/cryptography/hazmat/backends/openssl/ciphers.pyc in __init__(self, backend, cipher, mode, operation)
108 cipher.key,
109 iv_nonce,
--> 110 operation
111 )
112 self._backend.openssl_assert(res != 0)
```
I have this fix temporary
```key = key.encode('utf-8') if isinstance(key, unicode) else key```
But should we do thi in `backends/openssl/ciphers.py` ?
</issue>
<code>
[start of src/cryptography/hazmat/primitives/ciphers/algorithms.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 from cryptography import utils
8 from cryptography.hazmat.primitives.ciphers import (
9 BlockCipherAlgorithm, CipherAlgorithm
10 )
11 from cryptography.hazmat.primitives.ciphers.modes import ModeWithNonce
12
13
14 def _verify_key_size(algorithm, key):
15 # Verify that the key size matches the expected key size
16 if len(key) * 8 not in algorithm.key_sizes:
17 raise ValueError("Invalid key size ({0}) for {1}.".format(
18 len(key) * 8, algorithm.name
19 ))
20 return key
21
22
23 @utils.register_interface(BlockCipherAlgorithm)
24 @utils.register_interface(CipherAlgorithm)
25 class AES(object):
26 name = "AES"
27 block_size = 128
28 # 512 added to support AES-256-XTS, which uses 512-bit keys
29 key_sizes = frozenset([128, 192, 256, 512])
30
31 def __init__(self, key):
32 self.key = _verify_key_size(self, key)
33
34 @property
35 def key_size(self):
36 return len(self.key) * 8
37
38
39 @utils.register_interface(BlockCipherAlgorithm)
40 @utils.register_interface(CipherAlgorithm)
41 class Camellia(object):
42 name = "camellia"
43 block_size = 128
44 key_sizes = frozenset([128, 192, 256])
45
46 def __init__(self, key):
47 self.key = _verify_key_size(self, key)
48
49 @property
50 def key_size(self):
51 return len(self.key) * 8
52
53
54 @utils.register_interface(BlockCipherAlgorithm)
55 @utils.register_interface(CipherAlgorithm)
56 class TripleDES(object):
57 name = "3DES"
58 block_size = 64
59 key_sizes = frozenset([64, 128, 192])
60
61 def __init__(self, key):
62 if len(key) == 8:
63 key += key + key
64 elif len(key) == 16:
65 key += key[:8]
66 self.key = _verify_key_size(self, key)
67
68 @property
69 def key_size(self):
70 return len(self.key) * 8
71
72
73 @utils.register_interface(BlockCipherAlgorithm)
74 @utils.register_interface(CipherAlgorithm)
75 class Blowfish(object):
76 name = "Blowfish"
77 block_size = 64
78 key_sizes = frozenset(range(32, 449, 8))
79
80 def __init__(self, key):
81 self.key = _verify_key_size(self, key)
82
83 @property
84 def key_size(self):
85 return len(self.key) * 8
86
87
88 @utils.register_interface(BlockCipherAlgorithm)
89 @utils.register_interface(CipherAlgorithm)
90 class CAST5(object):
91 name = "CAST5"
92 block_size = 64
93 key_sizes = frozenset(range(40, 129, 8))
94
95 def __init__(self, key):
96 self.key = _verify_key_size(self, key)
97
98 @property
99 def key_size(self):
100 return len(self.key) * 8
101
102
103 @utils.register_interface(CipherAlgorithm)
104 class ARC4(object):
105 name = "RC4"
106 key_sizes = frozenset([40, 56, 64, 80, 128, 160, 192, 256])
107
108 def __init__(self, key):
109 self.key = _verify_key_size(self, key)
110
111 @property
112 def key_size(self):
113 return len(self.key) * 8
114
115
116 @utils.register_interface(CipherAlgorithm)
117 class IDEA(object):
118 name = "IDEA"
119 block_size = 64
120 key_sizes = frozenset([128])
121
122 def __init__(self, key):
123 self.key = _verify_key_size(self, key)
124
125 @property
126 def key_size(self):
127 return len(self.key) * 8
128
129
130 @utils.register_interface(BlockCipherAlgorithm)
131 @utils.register_interface(CipherAlgorithm)
132 class SEED(object):
133 name = "SEED"
134 block_size = 128
135 key_sizes = frozenset([128])
136
137 def __init__(self, key):
138 self.key = _verify_key_size(self, key)
139
140 @property
141 def key_size(self):
142 return len(self.key) * 8
143
144
145 @utils.register_interface(CipherAlgorithm)
146 @utils.register_interface(ModeWithNonce)
147 class ChaCha20(object):
148 name = "ChaCha20"
149 key_sizes = frozenset([256])
150
151 def __init__(self, key, nonce):
152 self.key = _verify_key_size(self, key)
153 if not isinstance(nonce, bytes):
154 raise TypeError("nonce must be bytes")
155
156 if len(nonce) != 16:
157 raise ValueError("nonce must be 128-bits (16 bytes)")
158
159 self._nonce = nonce
160
161 nonce = utils.read_only_property("_nonce")
162
163 @property
164 def key_size(self):
165 return len(self.key) * 8
166
[end of src/cryptography/hazmat/primitives/ciphers/algorithms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cryptography/hazmat/primitives/ciphers/algorithms.py b/src/cryptography/hazmat/primitives/ciphers/algorithms.py
--- a/src/cryptography/hazmat/primitives/ciphers/algorithms.py
+++ b/src/cryptography/hazmat/primitives/ciphers/algorithms.py
@@ -12,6 +12,9 @@
def _verify_key_size(algorithm, key):
+ # Verify that the key is instance of bytes
+ utils._check_bytes("key", key)
+
# Verify that the key size matches the expected key size
if len(key) * 8 not in algorithm.key_sizes:
raise ValueError("Invalid key size ({0}) for {1}.".format(
| {"golden_diff": "diff --git a/src/cryptography/hazmat/primitives/ciphers/algorithms.py b/src/cryptography/hazmat/primitives/ciphers/algorithms.py\n--- a/src/cryptography/hazmat/primitives/ciphers/algorithms.py\n+++ b/src/cryptography/hazmat/primitives/ciphers/algorithms.py\n@@ -12,6 +12,9 @@\n \n \n def _verify_key_size(algorithm, key):\n+ # Verify that the key is instance of bytes\n+ utils._check_bytes(\"key\", key)\n+\n # Verify that the key size matches the expected key size\n if len(key) * 8 not in algorithm.key_sizes:\n raise ValueError(\"Invalid key size ({0}) for {1}.\".format(\n", "issue": "Raise clearer error message when passing a unicode value as a symmetric key\nPython: 2.7.3\r\ncryptography==2.1.4\r\ncffi==1.11.5\r\npip 9.0.1\r\n\r\n```\r\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\r\nfrom cryptography.hazmat.backends import default_backend\r\nimport os\r\n\r\niv = os.urandom(16)\r\ncipher = Cipher(algorithms.AES(key), modes.CFB8(iv),\r\n backend=default_backend())\r\ndecryptor = cipher.decryptor()\r\nraw_text = decryptor.update(encoded_text) + decryptor.finalize()\r\n```\r\n\r\nIf `key` is unicode it will raise error.\r\n```\r\n\r\n/env/local/lib/python2.7/site-packages/cryptography/hazmat/primitives/ciphers/base.pyc in decryptor(self)\r\n 125 def decryptor(self):\r\n 126 ctx = self._backend.create_symmetric_decryption_ctx(\r\n--> 127 self.algorithm, self.mode\r\n 128 )\r\n 129 return self._wrap_ctx(ctx, encrypt=False)\r\n\r\n/env/local/lib/python2.7/site-packages/cryptography/hazmat/backends/openssl/backend.pyc in create_symmetric_decryption_ctx(self, cipher, mode)\r\n 270 \r\n 271 def create_symmetric_decryption_ctx(self, cipher, mode):\r\n--> 272 return _CipherContext(self, cipher, mode, _CipherContext._DECRYPT)\r\n 273 \r\n 274 def pbkdf2_hmac_supported(self, algorithm):\r\n\r\n/env/local/lib/python2.7/site-packages/cryptography/hazmat/backends/openssl/ciphers.pyc in __init__(self, backend, cipher, mode, operation)\r\n 108 cipher.key,\r\n 109 iv_nonce,\r\n--> 110 operation\r\n 111 )\r\n 112 self._backend.openssl_assert(res != 0)\r\n``` \r\n\r\nI have this fix temporary\r\n```key = key.encode('utf-8') if isinstance(key, unicode) else key```\r\nBut should we do thi in `backends/openssl/ciphers.py` ?\r\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom cryptography import utils\nfrom cryptography.hazmat.primitives.ciphers import (\n BlockCipherAlgorithm, CipherAlgorithm\n)\nfrom cryptography.hazmat.primitives.ciphers.modes import ModeWithNonce\n\n\ndef _verify_key_size(algorithm, key):\n # Verify that the key size matches the expected key size\n if len(key) * 8 not in algorithm.key_sizes:\n raise ValueError(\"Invalid key size ({0}) for {1}.\".format(\n len(key) * 8, algorithm.name\n ))\n return key\n\n\[email protected]_interface(BlockCipherAlgorithm)\[email protected]_interface(CipherAlgorithm)\nclass AES(object):\n name = \"AES\"\n block_size = 128\n # 512 added to support AES-256-XTS, which uses 512-bit keys\n key_sizes = frozenset([128, 192, 256, 512])\n\n def __init__(self, key):\n self.key = _verify_key_size(self, key)\n\n @property\n def key_size(self):\n return len(self.key) * 8\n\n\[email protected]_interface(BlockCipherAlgorithm)\[email protected]_interface(CipherAlgorithm)\nclass Camellia(object):\n name = \"camellia\"\n block_size = 128\n key_sizes = frozenset([128, 192, 256])\n\n def __init__(self, key):\n self.key = _verify_key_size(self, key)\n\n @property\n def key_size(self):\n return len(self.key) * 8\n\n\[email protected]_interface(BlockCipherAlgorithm)\[email protected]_interface(CipherAlgorithm)\nclass TripleDES(object):\n name = \"3DES\"\n block_size = 64\n key_sizes = frozenset([64, 128, 192])\n\n def __init__(self, key):\n if len(key) == 8:\n key += key + key\n elif len(key) == 16:\n key += key[:8]\n self.key = _verify_key_size(self, key)\n\n @property\n def key_size(self):\n return len(self.key) * 8\n\n\[email protected]_interface(BlockCipherAlgorithm)\[email protected]_interface(CipherAlgorithm)\nclass Blowfish(object):\n name = \"Blowfish\"\n block_size = 64\n key_sizes = frozenset(range(32, 449, 8))\n\n def __init__(self, key):\n self.key = _verify_key_size(self, key)\n\n @property\n def key_size(self):\n return len(self.key) * 8\n\n\[email protected]_interface(BlockCipherAlgorithm)\[email protected]_interface(CipherAlgorithm)\nclass CAST5(object):\n name = \"CAST5\"\n block_size = 64\n key_sizes = frozenset(range(40, 129, 8))\n\n def __init__(self, key):\n self.key = _verify_key_size(self, key)\n\n @property\n def key_size(self):\n return len(self.key) * 8\n\n\[email protected]_interface(CipherAlgorithm)\nclass ARC4(object):\n name = \"RC4\"\n key_sizes = frozenset([40, 56, 64, 80, 128, 160, 192, 256])\n\n def __init__(self, key):\n self.key = _verify_key_size(self, key)\n\n @property\n def key_size(self):\n return len(self.key) * 8\n\n\[email protected]_interface(CipherAlgorithm)\nclass IDEA(object):\n name = \"IDEA\"\n block_size = 64\n key_sizes = frozenset([128])\n\n def __init__(self, key):\n self.key = _verify_key_size(self, key)\n\n @property\n def key_size(self):\n return len(self.key) * 8\n\n\[email protected]_interface(BlockCipherAlgorithm)\[email protected]_interface(CipherAlgorithm)\nclass SEED(object):\n name = \"SEED\"\n block_size = 128\n key_sizes = frozenset([128])\n\n def __init__(self, key):\n self.key = _verify_key_size(self, key)\n\n @property\n def key_size(self):\n return len(self.key) * 8\n\n\[email protected]_interface(CipherAlgorithm)\[email protected]_interface(ModeWithNonce)\nclass ChaCha20(object):\n name = \"ChaCha20\"\n key_sizes = frozenset([256])\n\n def __init__(self, key, nonce):\n self.key = _verify_key_size(self, key)\n if not isinstance(nonce, bytes):\n raise TypeError(\"nonce must be bytes\")\n\n if len(nonce) != 16:\n raise ValueError(\"nonce must be 128-bits (16 bytes)\")\n\n self._nonce = nonce\n\n nonce = utils.read_only_property(\"_nonce\")\n\n @property\n def key_size(self):\n return len(self.key) * 8\n", "path": "src/cryptography/hazmat/primitives/ciphers/algorithms.py"}]} | 2,611 | 155 |
gh_patches_debug_6367 | rasdani/github-patches | git_diff | MongoEngine__mongoengine-1769 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
raise StopIteration in generators is deprecated
```
...
/Users/Vic/projects/brain2/brain/utils/user_progress.py:686: DeprecationWarning: generator 'QuerySet._iter_results' raised StopIteration
for action in actions:
/Users/Vic/projects/brain2/brain/utils/user_course_state.py:317: DeprecationWarning: generator 'QuerySet._iter_results' raised StopIteration
PlayedTask(
/Users/Vic/projects/brain2/venv/lib/python3.6/site-packages/mongoengine/dereference.py:33: DeprecationWarning: generator 'QuerySet._iter_results' raised StopIteration
...
```
https://stackoverflow.com/questions/14183803/in-pythons-generators-what-is-the-difference-between-raise-stopiteration-and
https://github.com/MongoEngine/mongoengine/blob/5bdd35464b89dd2def1dcb0464e244322f1bc757/mongoengine/queryset/queryset.py#L95
</issue>
<code>
[start of mongoengine/queryset/queryset.py]
1 import six
2
3 from mongoengine.errors import OperationError
4 from mongoengine.queryset.base import (BaseQuerySet, CASCADE, DENY, DO_NOTHING,
5 NULLIFY, PULL)
6
7 __all__ = ('QuerySet', 'QuerySetNoCache', 'DO_NOTHING', 'NULLIFY', 'CASCADE',
8 'DENY', 'PULL')
9
10 # The maximum number of items to display in a QuerySet.__repr__
11 REPR_OUTPUT_SIZE = 20
12 ITER_CHUNK_SIZE = 100
13
14
15 class QuerySet(BaseQuerySet):
16 """The default queryset, that builds queries and handles a set of results
17 returned from a query.
18
19 Wraps a MongoDB cursor, providing :class:`~mongoengine.Document` objects as
20 the results.
21 """
22
23 _has_more = True
24 _len = None
25 _result_cache = None
26
27 def __iter__(self):
28 """Iteration utilises a results cache which iterates the cursor
29 in batches of ``ITER_CHUNK_SIZE``.
30
31 If ``self._has_more`` the cursor hasn't been exhausted so cache then
32 batch. Otherwise iterate the result_cache.
33 """
34 self._iter = True
35
36 if self._has_more:
37 return self._iter_results()
38
39 # iterating over the cache.
40 return iter(self._result_cache)
41
42 def __len__(self):
43 """Since __len__ is called quite frequently (for example, as part of
44 list(qs)), we populate the result cache and cache the length.
45 """
46 if self._len is not None:
47 return self._len
48
49 # Populate the result cache with *all* of the docs in the cursor
50 if self._has_more:
51 list(self._iter_results())
52
53 # Cache the length of the complete result cache and return it
54 self._len = len(self._result_cache)
55 return self._len
56
57 def __repr__(self):
58 """Provide a string representation of the QuerySet"""
59 if self._iter:
60 return '.. queryset mid-iteration ..'
61
62 self._populate_cache()
63 data = self._result_cache[:REPR_OUTPUT_SIZE + 1]
64 if len(data) > REPR_OUTPUT_SIZE:
65 data[-1] = '...(remaining elements truncated)...'
66 return repr(data)
67
68 def _iter_results(self):
69 """A generator for iterating over the result cache.
70
71 Also populates the cache if there are more possible results to
72 yield. Raises StopIteration when there are no more results.
73 """
74 if self._result_cache is None:
75 self._result_cache = []
76
77 pos = 0
78 while True:
79
80 # For all positions lower than the length of the current result
81 # cache, serve the docs straight from the cache w/o hitting the
82 # database.
83 # XXX it's VERY important to compute the len within the `while`
84 # condition because the result cache might expand mid-iteration
85 # (e.g. if we call len(qs) inside a loop that iterates over the
86 # queryset). Fortunately len(list) is O(1) in Python, so this
87 # doesn't cause performance issues.
88 while pos < len(self._result_cache):
89 yield self._result_cache[pos]
90 pos += 1
91
92 # Raise StopIteration if we already established there were no more
93 # docs in the db cursor.
94 if not self._has_more:
95 raise StopIteration
96
97 # Otherwise, populate more of the cache and repeat.
98 if len(self._result_cache) <= pos:
99 self._populate_cache()
100
101 def _populate_cache(self):
102 """
103 Populates the result cache with ``ITER_CHUNK_SIZE`` more entries
104 (until the cursor is exhausted).
105 """
106 if self._result_cache is None:
107 self._result_cache = []
108
109 # Skip populating the cache if we already established there are no
110 # more docs to pull from the database.
111 if not self._has_more:
112 return
113
114 # Pull in ITER_CHUNK_SIZE docs from the database and store them in
115 # the result cache.
116 try:
117 for _ in six.moves.range(ITER_CHUNK_SIZE):
118 self._result_cache.append(self.next())
119 except StopIteration:
120 # Getting this exception means there are no more docs in the
121 # db cursor. Set _has_more to False so that we can use that
122 # information in other places.
123 self._has_more = False
124
125 def count(self, with_limit_and_skip=False):
126 """Count the selected elements in the query.
127
128 :param with_limit_and_skip (optional): take any :meth:`limit` or
129 :meth:`skip` that has been applied to this cursor into account when
130 getting the count
131 """
132 if with_limit_and_skip is False:
133 return super(QuerySet, self).count(with_limit_and_skip)
134
135 if self._len is None:
136 self._len = super(QuerySet, self).count(with_limit_and_skip)
137
138 return self._len
139
140 def no_cache(self):
141 """Convert to a non-caching queryset
142
143 .. versionadded:: 0.8.3 Convert to non caching queryset
144 """
145 if self._result_cache is not None:
146 raise OperationError('QuerySet already cached')
147
148 return self._clone_into(QuerySetNoCache(self._document,
149 self._collection))
150
151
152 class QuerySetNoCache(BaseQuerySet):
153 """A non caching QuerySet"""
154
155 def cache(self):
156 """Convert to a caching queryset
157
158 .. versionadded:: 0.8.3 Convert to caching queryset
159 """
160 return self._clone_into(QuerySet(self._document, self._collection))
161
162 def __repr__(self):
163 """Provides the string representation of the QuerySet
164
165 .. versionchanged:: 0.6.13 Now doesnt modify the cursor
166 """
167 if self._iter:
168 return '.. queryset mid-iteration ..'
169
170 data = []
171 for _ in six.moves.range(REPR_OUTPUT_SIZE + 1):
172 try:
173 data.append(self.next())
174 except StopIteration:
175 break
176
177 if len(data) > REPR_OUTPUT_SIZE:
178 data[-1] = '...(remaining elements truncated)...'
179
180 self.rewind()
181 return repr(data)
182
183 def __iter__(self):
184 queryset = self
185 if queryset._iter:
186 queryset = self.clone()
187 queryset.rewind()
188 return queryset
189
190
191 class QuerySetNoDeRef(QuerySet):
192 """Special no_dereference QuerySet"""
193
194 def __dereference(items, max_depth=1, instance=None, name=None):
195 return items
196
[end of mongoengine/queryset/queryset.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mongoengine/queryset/queryset.py b/mongoengine/queryset/queryset.py
--- a/mongoengine/queryset/queryset.py
+++ b/mongoengine/queryset/queryset.py
@@ -92,7 +92,7 @@
# Raise StopIteration if we already established there were no more
# docs in the db cursor.
if not self._has_more:
- raise StopIteration
+ return
# Otherwise, populate more of the cache and repeat.
if len(self._result_cache) <= pos:
| {"golden_diff": "diff --git a/mongoengine/queryset/queryset.py b/mongoengine/queryset/queryset.py\n--- a/mongoengine/queryset/queryset.py\n+++ b/mongoengine/queryset/queryset.py\n@@ -92,7 +92,7 @@\n # Raise StopIteration if we already established there were no more\n # docs in the db cursor.\n if not self._has_more:\n- raise StopIteration\n+ return\n \n # Otherwise, populate more of the cache and repeat.\n if len(self._result_cache) <= pos:\n", "issue": "raise StopIteration in generators is deprecated\n```\r\n...\r\n/Users/Vic/projects/brain2/brain/utils/user_progress.py:686: DeprecationWarning: generator 'QuerySet._iter_results' raised StopIteration\r\n for action in actions:\r\n/Users/Vic/projects/brain2/brain/utils/user_course_state.py:317: DeprecationWarning: generator 'QuerySet._iter_results' raised StopIteration\r\n PlayedTask(\r\n/Users/Vic/projects/brain2/venv/lib/python3.6/site-packages/mongoengine/dereference.py:33: DeprecationWarning: generator 'QuerySet._iter_results' raised StopIteration\r\n...\r\n```\r\nhttps://stackoverflow.com/questions/14183803/in-pythons-generators-what-is-the-difference-between-raise-stopiteration-and\r\n\r\nhttps://github.com/MongoEngine/mongoengine/blob/5bdd35464b89dd2def1dcb0464e244322f1bc757/mongoengine/queryset/queryset.py#L95\n", "before_files": [{"content": "import six\n\nfrom mongoengine.errors import OperationError\nfrom mongoengine.queryset.base import (BaseQuerySet, CASCADE, DENY, DO_NOTHING,\n NULLIFY, PULL)\n\n__all__ = ('QuerySet', 'QuerySetNoCache', 'DO_NOTHING', 'NULLIFY', 'CASCADE',\n 'DENY', 'PULL')\n\n# The maximum number of items to display in a QuerySet.__repr__\nREPR_OUTPUT_SIZE = 20\nITER_CHUNK_SIZE = 100\n\n\nclass QuerySet(BaseQuerySet):\n \"\"\"The default queryset, that builds queries and handles a set of results\n returned from a query.\n\n Wraps a MongoDB cursor, providing :class:`~mongoengine.Document` objects as\n the results.\n \"\"\"\n\n _has_more = True\n _len = None\n _result_cache = None\n\n def __iter__(self):\n \"\"\"Iteration utilises a results cache which iterates the cursor\n in batches of ``ITER_CHUNK_SIZE``.\n\n If ``self._has_more`` the cursor hasn't been exhausted so cache then\n batch. Otherwise iterate the result_cache.\n \"\"\"\n self._iter = True\n\n if self._has_more:\n return self._iter_results()\n\n # iterating over the cache.\n return iter(self._result_cache)\n\n def __len__(self):\n \"\"\"Since __len__ is called quite frequently (for example, as part of\n list(qs)), we populate the result cache and cache the length.\n \"\"\"\n if self._len is not None:\n return self._len\n\n # Populate the result cache with *all* of the docs in the cursor\n if self._has_more:\n list(self._iter_results())\n\n # Cache the length of the complete result cache and return it\n self._len = len(self._result_cache)\n return self._len\n\n def __repr__(self):\n \"\"\"Provide a string representation of the QuerySet\"\"\"\n if self._iter:\n return '.. queryset mid-iteration ..'\n\n self._populate_cache()\n data = self._result_cache[:REPR_OUTPUT_SIZE + 1]\n if len(data) > REPR_OUTPUT_SIZE:\n data[-1] = '...(remaining elements truncated)...'\n return repr(data)\n\n def _iter_results(self):\n \"\"\"A generator for iterating over the result cache.\n\n Also populates the cache if there are more possible results to\n yield. Raises StopIteration when there are no more results.\n \"\"\"\n if self._result_cache is None:\n self._result_cache = []\n\n pos = 0\n while True:\n\n # For all positions lower than the length of the current result\n # cache, serve the docs straight from the cache w/o hitting the\n # database.\n # XXX it's VERY important to compute the len within the `while`\n # condition because the result cache might expand mid-iteration\n # (e.g. if we call len(qs) inside a loop that iterates over the\n # queryset). Fortunately len(list) is O(1) in Python, so this\n # doesn't cause performance issues.\n while pos < len(self._result_cache):\n yield self._result_cache[pos]\n pos += 1\n\n # Raise StopIteration if we already established there were no more\n # docs in the db cursor.\n if not self._has_more:\n raise StopIteration\n\n # Otherwise, populate more of the cache and repeat.\n if len(self._result_cache) <= pos:\n self._populate_cache()\n\n def _populate_cache(self):\n \"\"\"\n Populates the result cache with ``ITER_CHUNK_SIZE`` more entries\n (until the cursor is exhausted).\n \"\"\"\n if self._result_cache is None:\n self._result_cache = []\n\n # Skip populating the cache if we already established there are no\n # more docs to pull from the database.\n if not self._has_more:\n return\n\n # Pull in ITER_CHUNK_SIZE docs from the database and store them in\n # the result cache.\n try:\n for _ in six.moves.range(ITER_CHUNK_SIZE):\n self._result_cache.append(self.next())\n except StopIteration:\n # Getting this exception means there are no more docs in the\n # db cursor. Set _has_more to False so that we can use that\n # information in other places.\n self._has_more = False\n\n def count(self, with_limit_and_skip=False):\n \"\"\"Count the selected elements in the query.\n\n :param with_limit_and_skip (optional): take any :meth:`limit` or\n :meth:`skip` that has been applied to this cursor into account when\n getting the count\n \"\"\"\n if with_limit_and_skip is False:\n return super(QuerySet, self).count(with_limit_and_skip)\n\n if self._len is None:\n self._len = super(QuerySet, self).count(with_limit_and_skip)\n\n return self._len\n\n def no_cache(self):\n \"\"\"Convert to a non-caching queryset\n\n .. versionadded:: 0.8.3 Convert to non caching queryset\n \"\"\"\n if self._result_cache is not None:\n raise OperationError('QuerySet already cached')\n\n return self._clone_into(QuerySetNoCache(self._document,\n self._collection))\n\n\nclass QuerySetNoCache(BaseQuerySet):\n \"\"\"A non caching QuerySet\"\"\"\n\n def cache(self):\n \"\"\"Convert to a caching queryset\n\n .. versionadded:: 0.8.3 Convert to caching queryset\n \"\"\"\n return self._clone_into(QuerySet(self._document, self._collection))\n\n def __repr__(self):\n \"\"\"Provides the string representation of the QuerySet\n\n .. versionchanged:: 0.6.13 Now doesnt modify the cursor\n \"\"\"\n if self._iter:\n return '.. queryset mid-iteration ..'\n\n data = []\n for _ in six.moves.range(REPR_OUTPUT_SIZE + 1):\n try:\n data.append(self.next())\n except StopIteration:\n break\n\n if len(data) > REPR_OUTPUT_SIZE:\n data[-1] = '...(remaining elements truncated)...'\n\n self.rewind()\n return repr(data)\n\n def __iter__(self):\n queryset = self\n if queryset._iter:\n queryset = self.clone()\n queryset.rewind()\n return queryset\n\n\nclass QuerySetNoDeRef(QuerySet):\n \"\"\"Special no_dereference QuerySet\"\"\"\n\n def __dereference(items, max_depth=1, instance=None, name=None):\n return items\n", "path": "mongoengine/queryset/queryset.py"}]} | 2,685 | 119 |
gh_patches_debug_41711 | rasdani/github-patches | git_diff | python-poetry__poetry-5320 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Upgrading packages from private repositories fails using cert and client-cert
<!--
Hi there! Thank you for discovering and submitting an issue.
Before you submit this; let's make sure of a few things.
Please make sure the following boxes are ticked if they are correct.
If not, please try and fulfill these first.
-->
<!-- Checked checkbox should look like this: [x] -->
- [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version.
- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.
- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).
<!--
Once those are done, if you're able to fill in the following list with your information,
it'd be very helpful to whoever handles the issue.
-->
- **Poetry version**: **1.1.4** <!-- Replace with version -->
## Issue
<!-- Now feel free to write your issue, but please be descriptive! Thanks again 🙌 ❤️ -->
To configure the cert and client-cert I am using:
poetry config certificates.private-pypi.cert /path/to/ca.crt
poetry config certificates.private-pypi.client-cert /path/to/client.pem
... from the custom CA & TLS [instructions](https://python-poetry.org/docs/repositories/#custom-certificate-authority-and-mutual-tls-authentication).
And from pyproject.toml:
[[tool.poetry.source]]
name = "private-pypi"
url = "https://private-pypi/simple"
When doing a `poetry update` to get the latest packages, it fails to get packages from private repositories.
It fails with:
Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify
failed: self signed certificate in certificate chain (_ssl.c:1091)'))
... I started to debug that error it looked like the cert and CA were simply not provided in the http request. Actually here's what I saw:
A single successful request where the cert and CA were passed:
GET https://private-pypi/simple/<package_name>/
And then 5 failed retries that had the SSLCertVerificationError:
GET https://private-pypi/packages/<package_name>/<package>-py3-none-any.whl#md5=blah
And in those 5 requests it looked like the cert and CA were not provided. So in my CI build I am not able to install `<package>` which happens to be an upgrade. I believe this started to happen with 1.1.0 as I can force version 1.0.10 and then the CI build will succeed and the package will install successfully.
</issue>
<code>
[start of src/poetry/utils/authenticator.py]
1 from __future__ import annotations
2
3 import logging
4 import time
5 import urllib.parse
6
7 from typing import TYPE_CHECKING
8 from typing import Any
9
10 import requests
11 import requests.auth
12 import requests.exceptions
13
14 from poetry.exceptions import PoetryException
15 from poetry.utils.password_manager import PasswordManager
16
17
18 if TYPE_CHECKING:
19 from cleo.io.io import IO
20
21 from poetry.config.config import Config
22
23
24 logger = logging.getLogger()
25
26
27 class Authenticator:
28 def __init__(self, config: Config, io: IO | None = None) -> None:
29 self._config = config
30 self._io = io
31 self._session = None
32 self._credentials = {}
33 self._password_manager = PasswordManager(self._config)
34
35 def _log(self, message: str, level: str = "debug") -> None:
36 if self._io is not None:
37 self._io.write_line(f"<{level}>{message}</{level}>")
38 else:
39 getattr(logger, level, logger.debug)(message)
40
41 @property
42 def session(self) -> requests.Session:
43 if self._session is None:
44 self._session = requests.Session()
45
46 return self._session
47
48 def __del__(self) -> None:
49 if self._session is not None:
50 self._session.close()
51
52 def request(self, method: str, url: str, **kwargs: Any) -> requests.Response:
53 request = requests.Request(method, url)
54 username, password = self.get_credentials_for_url(url)
55
56 if username is not None and password is not None:
57 request = requests.auth.HTTPBasicAuth(username, password)(request)
58
59 session = self.session
60 prepared_request = session.prepare_request(request)
61
62 proxies = kwargs.get("proxies", {})
63 stream = kwargs.get("stream")
64 verify = kwargs.get("verify")
65 cert = kwargs.get("cert")
66
67 settings = session.merge_environment_settings(
68 prepared_request.url, proxies, stream, verify, cert
69 )
70
71 # Send the request.
72 send_kwargs = {
73 "timeout": kwargs.get("timeout"),
74 "allow_redirects": kwargs.get("allow_redirects", True),
75 }
76 send_kwargs.update(settings)
77
78 attempt = 0
79
80 while True:
81 is_last_attempt = attempt >= 5
82 try:
83 resp = session.send(prepared_request, **send_kwargs)
84 except (requests.exceptions.ConnectionError, OSError) as e:
85 if is_last_attempt:
86 raise e
87 else:
88 if resp.status_code not in [502, 503, 504] or is_last_attempt:
89 resp.raise_for_status()
90 return resp
91
92 if not is_last_attempt:
93 attempt += 1
94 delay = 0.5 * attempt
95 self._log(f"Retrying HTTP request in {delay} seconds.", level="debug")
96 time.sleep(delay)
97 continue
98
99 # this should never really be hit under any sane circumstance
100 raise PoetryException("Failed HTTP {} request", method.upper())
101
102 def get_credentials_for_url(self, url: str) -> tuple[str | None, str | None]:
103 parsed_url = urllib.parse.urlsplit(url)
104
105 netloc = parsed_url.netloc
106
107 credentials = self._credentials.get(netloc, (None, None))
108
109 if credentials == (None, None):
110 if "@" not in netloc:
111 credentials = self._get_credentials_for_netloc(netloc)
112 else:
113 # Split from the right because that's how urllib.parse.urlsplit()
114 # behaves if more than one @ is present (which can be checked using
115 # the password attribute of urlsplit()'s return value).
116 auth, netloc = netloc.rsplit("@", 1)
117 # Split from the left because that's how urllib.parse.urlsplit()
118 # behaves if more than one : is present (which again can be checked
119 # using the password attribute of the return value)
120 credentials = auth.split(":", 1) if ":" in auth else (auth, None)
121 credentials = tuple(
122 None if x is None else urllib.parse.unquote(x) for x in credentials
123 )
124
125 if credentials[0] is not None or credentials[1] is not None:
126 credentials = (credentials[0] or "", credentials[1] or "")
127
128 self._credentials[netloc] = credentials
129
130 return credentials[0], credentials[1]
131
132 def get_pypi_token(self, name: str) -> str:
133 return self._password_manager.get_pypi_token(name)
134
135 def get_http_auth(self, name: str) -> dict[str, str] | None:
136 return self._get_http_auth(name, None)
137
138 def _get_http_auth(self, name: str, netloc: str | None) -> dict[str, str] | None:
139 if name == "pypi":
140 url = "https://upload.pypi.org/legacy/"
141 else:
142 url = self._config.get(f"repositories.{name}.url")
143 if not url:
144 return None
145
146 parsed_url = urllib.parse.urlsplit(url)
147
148 if netloc is None or netloc == parsed_url.netloc:
149 auth = self._password_manager.get_http_auth(name)
150
151 if auth is None or auth["password"] is None:
152 username = auth["username"] if auth else None
153 auth = self._get_credentials_for_netloc_from_keyring(
154 url, parsed_url.netloc, username
155 )
156
157 return auth
158
159 def _get_credentials_for_netloc(self, netloc: str) -> tuple[str | None, str | None]:
160 for repository_name in self._config.get("repositories", []):
161 auth = self._get_http_auth(repository_name, netloc)
162
163 if auth is None:
164 continue
165
166 return auth["username"], auth["password"]
167
168 return None, None
169
170 def _get_credentials_for_netloc_from_keyring(
171 self, url: str, netloc: str, username: str | None
172 ) -> dict[str, str] | None:
173 import keyring
174
175 cred = keyring.get_credential(url, username)
176 if cred is not None:
177 return {
178 "username": cred.username,
179 "password": cred.password,
180 }
181
182 cred = keyring.get_credential(netloc, username)
183 if cred is not None:
184 return {
185 "username": cred.username,
186 "password": cred.password,
187 }
188
189 if username:
190 return {
191 "username": username,
192 "password": None,
193 }
194
195 return None
196
[end of src/poetry/utils/authenticator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/poetry/utils/authenticator.py b/src/poetry/utils/authenticator.py
--- a/src/poetry/utils/authenticator.py
+++ b/src/poetry/utils/authenticator.py
@@ -6,16 +6,21 @@
from typing import TYPE_CHECKING
from typing import Any
+from typing import Iterator
import requests
import requests.auth
import requests.exceptions
from poetry.exceptions import PoetryException
+from poetry.utils.helpers import get_cert
+from poetry.utils.helpers import get_client_cert
from poetry.utils.password_manager import PasswordManager
if TYPE_CHECKING:
+ from pathlib import Path
+
from cleo.io.io import IO
from poetry.config.config import Config
@@ -30,6 +35,7 @@
self._io = io
self._session = None
self._credentials = {}
+ self._certs = {}
self._password_manager = PasswordManager(self._config)
def _log(self, message: str, level: str = "debug") -> None:
@@ -61,8 +67,16 @@
proxies = kwargs.get("proxies", {})
stream = kwargs.get("stream")
- verify = kwargs.get("verify")
- cert = kwargs.get("cert")
+
+ certs = self.get_certs_for_url(url)
+ verify = kwargs.get("verify") or certs.get("verify")
+ cert = kwargs.get("cert") or certs.get("cert")
+
+ if cert is not None:
+ cert = str(cert)
+
+ if verify is not None:
+ verify = str(verify)
settings = session.merge_environment_settings(
prepared_request.url, proxies, stream, verify, cert
@@ -157,7 +171,7 @@
return auth
def _get_credentials_for_netloc(self, netloc: str) -> tuple[str | None, str | None]:
- for repository_name in self._config.get("repositories", []):
+ for repository_name, _ in self._get_repository_netlocs():
auth = self._get_http_auth(repository_name, netloc)
if auth is None:
@@ -167,6 +181,22 @@
return None, None
+ def get_certs_for_url(self, url: str) -> dict[str, Path | None]:
+ parsed_url = urllib.parse.urlsplit(url)
+
+ netloc = parsed_url.netloc
+
+ return self._certs.setdefault(
+ netloc,
+ self._get_certs_for_netloc_from_config(netloc),
+ )
+
+ def _get_repository_netlocs(self) -> Iterator[tuple[str, str]]:
+ for repository_name in self._config.get("repositories", []):
+ url = self._config.get(f"repositories.{repository_name}.url")
+ parsed_url = urllib.parse.urlsplit(url)
+ yield repository_name, parsed_url.netloc
+
def _get_credentials_for_netloc_from_keyring(
self, url: str, netloc: str, username: str | None
) -> dict[str, str] | None:
@@ -193,3 +223,14 @@
}
return None
+
+ def _get_certs_for_netloc_from_config(self, netloc: str) -> dict[str, Path | None]:
+ certs = {"cert": None, "verify": None}
+
+ for repository_name, repository_netloc in self._get_repository_netlocs():
+ if netloc == repository_netloc:
+ certs["cert"] = get_client_cert(self._config, repository_name)
+ certs["verify"] = get_cert(self._config, repository_name)
+ break
+
+ return certs
| {"golden_diff": "diff --git a/src/poetry/utils/authenticator.py b/src/poetry/utils/authenticator.py\n--- a/src/poetry/utils/authenticator.py\n+++ b/src/poetry/utils/authenticator.py\n@@ -6,16 +6,21 @@\n \n from typing import TYPE_CHECKING\n from typing import Any\n+from typing import Iterator\n \n import requests\n import requests.auth\n import requests.exceptions\n \n from poetry.exceptions import PoetryException\n+from poetry.utils.helpers import get_cert\n+from poetry.utils.helpers import get_client_cert\n from poetry.utils.password_manager import PasswordManager\n \n \n if TYPE_CHECKING:\n+ from pathlib import Path\n+\n from cleo.io.io import IO\n \n from poetry.config.config import Config\n@@ -30,6 +35,7 @@\n self._io = io\n self._session = None\n self._credentials = {}\n+ self._certs = {}\n self._password_manager = PasswordManager(self._config)\n \n def _log(self, message: str, level: str = \"debug\") -> None:\n@@ -61,8 +67,16 @@\n \n proxies = kwargs.get(\"proxies\", {})\n stream = kwargs.get(\"stream\")\n- verify = kwargs.get(\"verify\")\n- cert = kwargs.get(\"cert\")\n+\n+ certs = self.get_certs_for_url(url)\n+ verify = kwargs.get(\"verify\") or certs.get(\"verify\")\n+ cert = kwargs.get(\"cert\") or certs.get(\"cert\")\n+\n+ if cert is not None:\n+ cert = str(cert)\n+\n+ if verify is not None:\n+ verify = str(verify)\n \n settings = session.merge_environment_settings(\n prepared_request.url, proxies, stream, verify, cert\n@@ -157,7 +171,7 @@\n return auth\n \n def _get_credentials_for_netloc(self, netloc: str) -> tuple[str | None, str | None]:\n- for repository_name in self._config.get(\"repositories\", []):\n+ for repository_name, _ in self._get_repository_netlocs():\n auth = self._get_http_auth(repository_name, netloc)\n \n if auth is None:\n@@ -167,6 +181,22 @@\n \n return None, None\n \n+ def get_certs_for_url(self, url: str) -> dict[str, Path | None]:\n+ parsed_url = urllib.parse.urlsplit(url)\n+\n+ netloc = parsed_url.netloc\n+\n+ return self._certs.setdefault(\n+ netloc,\n+ self._get_certs_for_netloc_from_config(netloc),\n+ )\n+\n+ def _get_repository_netlocs(self) -> Iterator[tuple[str, str]]:\n+ for repository_name in self._config.get(\"repositories\", []):\n+ url = self._config.get(f\"repositories.{repository_name}.url\")\n+ parsed_url = urllib.parse.urlsplit(url)\n+ yield repository_name, parsed_url.netloc\n+\n def _get_credentials_for_netloc_from_keyring(\n self, url: str, netloc: str, username: str | None\n ) -> dict[str, str] | None:\n@@ -193,3 +223,14 @@\n }\n \n return None\n+\n+ def _get_certs_for_netloc_from_config(self, netloc: str) -> dict[str, Path | None]:\n+ certs = {\"cert\": None, \"verify\": None}\n+\n+ for repository_name, repository_netloc in self._get_repository_netlocs():\n+ if netloc == repository_netloc:\n+ certs[\"cert\"] = get_client_cert(self._config, repository_name)\n+ certs[\"verify\"] = get_cert(self._config, repository_name)\n+ break\n+\n+ return certs\n", "issue": "Upgrading packages from private repositories fails using cert and client-cert \n<!--\r\n Hi there! Thank you for discovering and submitting an issue.\r\n\r\n Before you submit this; let's make sure of a few things.\r\n Please make sure the following boxes are ticked if they are correct.\r\n If not, please try and fulfill these first.\r\n-->\r\n\r\n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version.\r\n- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).\r\n\r\n<!--\r\n Once those are done, if you're able to fill in the following list with your information,\r\n it'd be very helpful to whoever handles the issue.\r\n-->\r\n\r\n- **Poetry version**: **1.1.4** <!-- Replace with version -->\r\n\r\n## Issue\r\n<!-- Now feel free to write your issue, but please be descriptive! Thanks again \ud83d\ude4c \u2764\ufe0f -->\r\n\r\nTo configure the cert and client-cert I am using:\r\n\r\n poetry config certificates.private-pypi.cert /path/to/ca.crt\r\n poetry config certificates.private-pypi.client-cert /path/to/client.pem\r\n\r\n... from the custom CA & TLS [instructions](https://python-poetry.org/docs/repositories/#custom-certificate-authority-and-mutual-tls-authentication).\r\n\r\nAnd from pyproject.toml:\r\n\r\n [[tool.poetry.source]]\r\n name = \"private-pypi\"\r\n url = \"https://private-pypi/simple\"\r\n\r\nWhen doing a `poetry update` to get the latest packages, it fails to get packages from private repositories. \r\n\r\nIt fails with:\r\n\r\n Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify\r\n failed: self signed certificate in certificate chain (_ssl.c:1091)'))\r\n\r\n... I started to debug that error it looked like the cert and CA were simply not provided in the http request. Actually here's what I saw:\r\n\r\nA single successful request where the cert and CA were passed:\r\n\r\n GET https://private-pypi/simple/<package_name>/\r\n\r\nAnd then 5 failed retries that had the SSLCertVerificationError:\r\n\r\n GET https://private-pypi/packages/<package_name>/<package>-py3-none-any.whl#md5=blah\r\n\r\nAnd in those 5 requests it looked like the cert and CA were not provided. So in my CI build I am not able to install `<package>` which happens to be an upgrade. I believe this started to happen with 1.1.0 as I can force version 1.0.10 and then the CI build will succeed and the package will install successfully.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport logging\nimport time\nimport urllib.parse\n\nfrom typing import TYPE_CHECKING\nfrom typing import Any\n\nimport requests\nimport requests.auth\nimport requests.exceptions\n\nfrom poetry.exceptions import PoetryException\nfrom poetry.utils.password_manager import PasswordManager\n\n\nif TYPE_CHECKING:\n from cleo.io.io import IO\n\n from poetry.config.config import Config\n\n\nlogger = logging.getLogger()\n\n\nclass Authenticator:\n def __init__(self, config: Config, io: IO | None = None) -> None:\n self._config = config\n self._io = io\n self._session = None\n self._credentials = {}\n self._password_manager = PasswordManager(self._config)\n\n def _log(self, message: str, level: str = \"debug\") -> None:\n if self._io is not None:\n self._io.write_line(f\"<{level}>{message}</{level}>\")\n else:\n getattr(logger, level, logger.debug)(message)\n\n @property\n def session(self) -> requests.Session:\n if self._session is None:\n self._session = requests.Session()\n\n return self._session\n\n def __del__(self) -> None:\n if self._session is not None:\n self._session.close()\n\n def request(self, method: str, url: str, **kwargs: Any) -> requests.Response:\n request = requests.Request(method, url)\n username, password = self.get_credentials_for_url(url)\n\n if username is not None and password is not None:\n request = requests.auth.HTTPBasicAuth(username, password)(request)\n\n session = self.session\n prepared_request = session.prepare_request(request)\n\n proxies = kwargs.get(\"proxies\", {})\n stream = kwargs.get(\"stream\")\n verify = kwargs.get(\"verify\")\n cert = kwargs.get(\"cert\")\n\n settings = session.merge_environment_settings(\n prepared_request.url, proxies, stream, verify, cert\n )\n\n # Send the request.\n send_kwargs = {\n \"timeout\": kwargs.get(\"timeout\"),\n \"allow_redirects\": kwargs.get(\"allow_redirects\", True),\n }\n send_kwargs.update(settings)\n\n attempt = 0\n\n while True:\n is_last_attempt = attempt >= 5\n try:\n resp = session.send(prepared_request, **send_kwargs)\n except (requests.exceptions.ConnectionError, OSError) as e:\n if is_last_attempt:\n raise e\n else:\n if resp.status_code not in [502, 503, 504] or is_last_attempt:\n resp.raise_for_status()\n return resp\n\n if not is_last_attempt:\n attempt += 1\n delay = 0.5 * attempt\n self._log(f\"Retrying HTTP request in {delay} seconds.\", level=\"debug\")\n time.sleep(delay)\n continue\n\n # this should never really be hit under any sane circumstance\n raise PoetryException(\"Failed HTTP {} request\", method.upper())\n\n def get_credentials_for_url(self, url: str) -> tuple[str | None, str | None]:\n parsed_url = urllib.parse.urlsplit(url)\n\n netloc = parsed_url.netloc\n\n credentials = self._credentials.get(netloc, (None, None))\n\n if credentials == (None, None):\n if \"@\" not in netloc:\n credentials = self._get_credentials_for_netloc(netloc)\n else:\n # Split from the right because that's how urllib.parse.urlsplit()\n # behaves if more than one @ is present (which can be checked using\n # the password attribute of urlsplit()'s return value).\n auth, netloc = netloc.rsplit(\"@\", 1)\n # Split from the left because that's how urllib.parse.urlsplit()\n # behaves if more than one : is present (which again can be checked\n # using the password attribute of the return value)\n credentials = auth.split(\":\", 1) if \":\" in auth else (auth, None)\n credentials = tuple(\n None if x is None else urllib.parse.unquote(x) for x in credentials\n )\n\n if credentials[0] is not None or credentials[1] is not None:\n credentials = (credentials[0] or \"\", credentials[1] or \"\")\n\n self._credentials[netloc] = credentials\n\n return credentials[0], credentials[1]\n\n def get_pypi_token(self, name: str) -> str:\n return self._password_manager.get_pypi_token(name)\n\n def get_http_auth(self, name: str) -> dict[str, str] | None:\n return self._get_http_auth(name, None)\n\n def _get_http_auth(self, name: str, netloc: str | None) -> dict[str, str] | None:\n if name == \"pypi\":\n url = \"https://upload.pypi.org/legacy/\"\n else:\n url = self._config.get(f\"repositories.{name}.url\")\n if not url:\n return None\n\n parsed_url = urllib.parse.urlsplit(url)\n\n if netloc is None or netloc == parsed_url.netloc:\n auth = self._password_manager.get_http_auth(name)\n\n if auth is None or auth[\"password\"] is None:\n username = auth[\"username\"] if auth else None\n auth = self._get_credentials_for_netloc_from_keyring(\n url, parsed_url.netloc, username\n )\n\n return auth\n\n def _get_credentials_for_netloc(self, netloc: str) -> tuple[str | None, str | None]:\n for repository_name in self._config.get(\"repositories\", []):\n auth = self._get_http_auth(repository_name, netloc)\n\n if auth is None:\n continue\n\n return auth[\"username\"], auth[\"password\"]\n\n return None, None\n\n def _get_credentials_for_netloc_from_keyring(\n self, url: str, netloc: str, username: str | None\n ) -> dict[str, str] | None:\n import keyring\n\n cred = keyring.get_credential(url, username)\n if cred is not None:\n return {\n \"username\": cred.username,\n \"password\": cred.password,\n }\n\n cred = keyring.get_credential(netloc, username)\n if cred is not None:\n return {\n \"username\": cred.username,\n \"password\": cred.password,\n }\n\n if username:\n return {\n \"username\": username,\n \"password\": None,\n }\n\n return None\n", "path": "src/poetry/utils/authenticator.py"}]} | 3,046 | 816 |
gh_patches_debug_7247 | rasdani/github-patches | git_diff | pre-commit__pre-commit-1299 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
core.hooksPath being set makes it not possible to install
I have a laptop which has some mandaotry global settings for git, and I have not been able to enable pre-commit. I also tried doing
```
git config --global init.templateDir ~/.git-template
pre-commit init-templatedir ~/.git-template
```
But still get the message
```
[ERROR] Cowardly refusing to install hooks with `core.hooksPath` set.
hint: `git config --unset-all core.hooksPath`
```
Is there any work arounds for this?
</issue>
<code>
[start of pre_commit/commands/install_uninstall.py]
1 import itertools
2 import logging
3 import os.path
4 import shutil
5 import sys
6 from typing import Optional
7 from typing import Sequence
8 from typing import Tuple
9
10 from pre_commit import git
11 from pre_commit import output
12 from pre_commit.clientlib import load_config
13 from pre_commit.repository import all_hooks
14 from pre_commit.repository import install_hook_envs
15 from pre_commit.store import Store
16 from pre_commit.util import make_executable
17 from pre_commit.util import resource_text
18
19
20 logger = logging.getLogger(__name__)
21
22 # This is used to identify the hook file we install
23 PRIOR_HASHES = (
24 '4d9958c90bc262f47553e2c073f14cfe',
25 'd8ee923c46731b42cd95cc869add4062',
26 '49fd668cb42069aa1b6048464be5d395',
27 '79f09a650522a87b0da915d0d983b2de',
28 'e358c9dae00eac5d06b38dfdb1e33a8c',
29 )
30 CURRENT_HASH = '138fd403232d2ddd5efb44317e38bf03'
31 TEMPLATE_START = '# start templated\n'
32 TEMPLATE_END = '# end templated\n'
33
34
35 def _hook_paths(
36 hook_type: str,
37 git_dir: Optional[str] = None,
38 ) -> Tuple[str, str]:
39 git_dir = git_dir if git_dir is not None else git.get_git_dir()
40 pth = os.path.join(git_dir, 'hooks', hook_type)
41 return pth, f'{pth}.legacy'
42
43
44 def is_our_script(filename: str) -> bool:
45 if not os.path.exists(filename): # pragma: windows no cover (symlink)
46 return False
47 with open(filename) as f:
48 contents = f.read()
49 return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)
50
51
52 def shebang() -> str:
53 if sys.platform == 'win32':
54 py = 'python'
55 else:
56 # Homebrew/homebrew-core#35825: be more timid about appropriate `PATH`
57 path_choices = [p for p in os.defpath.split(os.pathsep) if p]
58 exe_choices = [
59 f'python{sys.version_info[0]}.{sys.version_info[1]}',
60 f'python{sys.version_info[0]}',
61 ]
62 for path, exe in itertools.product(path_choices, exe_choices):
63 if os.access(os.path.join(path, exe), os.X_OK):
64 py = exe
65 break
66 else:
67 py = 'python'
68 return f'#!/usr/bin/env {py}'
69
70
71 def _install_hook_script(
72 config_file: str,
73 hook_type: str,
74 overwrite: bool = False,
75 skip_on_missing_config: bool = False,
76 git_dir: Optional[str] = None,
77 ) -> None:
78 hook_path, legacy_path = _hook_paths(hook_type, git_dir=git_dir)
79
80 os.makedirs(os.path.dirname(hook_path), exist_ok=True)
81
82 # If we have an existing hook, move it to pre-commit.legacy
83 if os.path.lexists(hook_path) and not is_our_script(hook_path):
84 shutil.move(hook_path, legacy_path)
85
86 # If we specify overwrite, we simply delete the legacy file
87 if overwrite and os.path.exists(legacy_path):
88 os.remove(legacy_path)
89 elif os.path.exists(legacy_path):
90 output.write_line(
91 f'Running in migration mode with existing hooks at {legacy_path}\n'
92 f'Use -f to use only pre-commit.',
93 )
94
95 args = ['hook-impl', f'--config={config_file}', f'--hook-type={hook_type}']
96 if skip_on_missing_config:
97 args.append('--skip-on-missing-config')
98 params = {'INSTALL_PYTHON': sys.executable, 'ARGS': args}
99
100 with open(hook_path, 'w') as hook_file:
101 contents = resource_text('hook-tmpl')
102 before, rest = contents.split(TEMPLATE_START)
103 to_template, after = rest.split(TEMPLATE_END)
104
105 before = before.replace('#!/usr/bin/env python3', shebang())
106
107 hook_file.write(before + TEMPLATE_START)
108 for line in to_template.splitlines():
109 var = line.split()[0]
110 hook_file.write(f'{var} = {params[var]!r}\n')
111 hook_file.write(TEMPLATE_END + after)
112 make_executable(hook_path)
113
114 output.write_line(f'pre-commit installed at {hook_path}')
115
116
117 def install(
118 config_file: str,
119 store: Store,
120 hook_types: Sequence[str],
121 overwrite: bool = False,
122 hooks: bool = False,
123 skip_on_missing_config: bool = False,
124 git_dir: Optional[str] = None,
125 ) -> int:
126 if git.has_core_hookpaths_set():
127 logger.error(
128 'Cowardly refusing to install hooks with `core.hooksPath` set.\n'
129 'hint: `git config --unset-all core.hooksPath`',
130 )
131 return 1
132
133 for hook_type in hook_types:
134 _install_hook_script(
135 config_file, hook_type,
136 overwrite=overwrite,
137 skip_on_missing_config=skip_on_missing_config,
138 git_dir=git_dir,
139 )
140
141 if hooks:
142 install_hooks(config_file, store)
143
144 return 0
145
146
147 def install_hooks(config_file: str, store: Store) -> int:
148 install_hook_envs(all_hooks(load_config(config_file), store), store)
149 return 0
150
151
152 def _uninstall_hook_script(hook_type: str) -> None:
153 hook_path, legacy_path = _hook_paths(hook_type)
154
155 # If our file doesn't exist or it isn't ours, gtfo.
156 if not os.path.exists(hook_path) or not is_our_script(hook_path):
157 return
158
159 os.remove(hook_path)
160 output.write_line(f'{hook_type} uninstalled')
161
162 if os.path.exists(legacy_path):
163 os.rename(legacy_path, hook_path)
164 output.write_line(f'Restored previous hooks to {hook_path}')
165
166
167 def uninstall(hook_types: Sequence[str]) -> int:
168 for hook_type in hook_types:
169 _uninstall_hook_script(hook_type)
170 return 0
171
[end of pre_commit/commands/install_uninstall.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/commands/install_uninstall.py b/pre_commit/commands/install_uninstall.py
--- a/pre_commit/commands/install_uninstall.py
+++ b/pre_commit/commands/install_uninstall.py
@@ -123,7 +123,7 @@
skip_on_missing_config: bool = False,
git_dir: Optional[str] = None,
) -> int:
- if git.has_core_hookpaths_set():
+ if git_dir is None and git.has_core_hookpaths_set():
logger.error(
'Cowardly refusing to install hooks with `core.hooksPath` set.\n'
'hint: `git config --unset-all core.hooksPath`',
| {"golden_diff": "diff --git a/pre_commit/commands/install_uninstall.py b/pre_commit/commands/install_uninstall.py\n--- a/pre_commit/commands/install_uninstall.py\n+++ b/pre_commit/commands/install_uninstall.py\n@@ -123,7 +123,7 @@\n skip_on_missing_config: bool = False,\n git_dir: Optional[str] = None,\n ) -> int:\n- if git.has_core_hookpaths_set():\n+ if git_dir is None and git.has_core_hookpaths_set():\n logger.error(\n 'Cowardly refusing to install hooks with `core.hooksPath` set.\\n'\n 'hint: `git config --unset-all core.hooksPath`',\n", "issue": "core.hooksPath being set makes it not possible to install\nI have a laptop which has some mandaotry global settings for git, and I have not been able to enable pre-commit. I also tried doing\r\n```\r\ngit config --global init.templateDir ~/.git-template\r\npre-commit init-templatedir ~/.git-template\r\n```\r\nBut still get the message\r\n```\r\n[ERROR] Cowardly refusing to install hooks with `core.hooksPath` set.\r\nhint: `git config --unset-all core.hooksPath`\r\n```\r\n\r\nIs there any work arounds for this?\n", "before_files": [{"content": "import itertools\nimport logging\nimport os.path\nimport shutil\nimport sys\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nfrom pre_commit import git\nfrom pre_commit import output\nfrom pre_commit.clientlib import load_config\nfrom pre_commit.repository import all_hooks\nfrom pre_commit.repository import install_hook_envs\nfrom pre_commit.store import Store\nfrom pre_commit.util import make_executable\nfrom pre_commit.util import resource_text\n\n\nlogger = logging.getLogger(__name__)\n\n# This is used to identify the hook file we install\nPRIOR_HASHES = (\n '4d9958c90bc262f47553e2c073f14cfe',\n 'd8ee923c46731b42cd95cc869add4062',\n '49fd668cb42069aa1b6048464be5d395',\n '79f09a650522a87b0da915d0d983b2de',\n 'e358c9dae00eac5d06b38dfdb1e33a8c',\n)\nCURRENT_HASH = '138fd403232d2ddd5efb44317e38bf03'\nTEMPLATE_START = '# start templated\\n'\nTEMPLATE_END = '# end templated\\n'\n\n\ndef _hook_paths(\n hook_type: str,\n git_dir: Optional[str] = None,\n) -> Tuple[str, str]:\n git_dir = git_dir if git_dir is not None else git.get_git_dir()\n pth = os.path.join(git_dir, 'hooks', hook_type)\n return pth, f'{pth}.legacy'\n\n\ndef is_our_script(filename: str) -> bool:\n if not os.path.exists(filename): # pragma: windows no cover (symlink)\n return False\n with open(filename) as f:\n contents = f.read()\n return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)\n\n\ndef shebang() -> str:\n if sys.platform == 'win32':\n py = 'python'\n else:\n # Homebrew/homebrew-core#35825: be more timid about appropriate `PATH`\n path_choices = [p for p in os.defpath.split(os.pathsep) if p]\n exe_choices = [\n f'python{sys.version_info[0]}.{sys.version_info[1]}',\n f'python{sys.version_info[0]}',\n ]\n for path, exe in itertools.product(path_choices, exe_choices):\n if os.access(os.path.join(path, exe), os.X_OK):\n py = exe\n break\n else:\n py = 'python'\n return f'#!/usr/bin/env {py}'\n\n\ndef _install_hook_script(\n config_file: str,\n hook_type: str,\n overwrite: bool = False,\n skip_on_missing_config: bool = False,\n git_dir: Optional[str] = None,\n) -> None:\n hook_path, legacy_path = _hook_paths(hook_type, git_dir=git_dir)\n\n os.makedirs(os.path.dirname(hook_path), exist_ok=True)\n\n # If we have an existing hook, move it to pre-commit.legacy\n if os.path.lexists(hook_path) and not is_our_script(hook_path):\n shutil.move(hook_path, legacy_path)\n\n # If we specify overwrite, we simply delete the legacy file\n if overwrite and os.path.exists(legacy_path):\n os.remove(legacy_path)\n elif os.path.exists(legacy_path):\n output.write_line(\n f'Running in migration mode with existing hooks at {legacy_path}\\n'\n f'Use -f to use only pre-commit.',\n )\n\n args = ['hook-impl', f'--config={config_file}', f'--hook-type={hook_type}']\n if skip_on_missing_config:\n args.append('--skip-on-missing-config')\n params = {'INSTALL_PYTHON': sys.executable, 'ARGS': args}\n\n with open(hook_path, 'w') as hook_file:\n contents = resource_text('hook-tmpl')\n before, rest = contents.split(TEMPLATE_START)\n to_template, after = rest.split(TEMPLATE_END)\n\n before = before.replace('#!/usr/bin/env python3', shebang())\n\n hook_file.write(before + TEMPLATE_START)\n for line in to_template.splitlines():\n var = line.split()[0]\n hook_file.write(f'{var} = {params[var]!r}\\n')\n hook_file.write(TEMPLATE_END + after)\n make_executable(hook_path)\n\n output.write_line(f'pre-commit installed at {hook_path}')\n\n\ndef install(\n config_file: str,\n store: Store,\n hook_types: Sequence[str],\n overwrite: bool = False,\n hooks: bool = False,\n skip_on_missing_config: bool = False,\n git_dir: Optional[str] = None,\n) -> int:\n if git.has_core_hookpaths_set():\n logger.error(\n 'Cowardly refusing to install hooks with `core.hooksPath` set.\\n'\n 'hint: `git config --unset-all core.hooksPath`',\n )\n return 1\n\n for hook_type in hook_types:\n _install_hook_script(\n config_file, hook_type,\n overwrite=overwrite,\n skip_on_missing_config=skip_on_missing_config,\n git_dir=git_dir,\n )\n\n if hooks:\n install_hooks(config_file, store)\n\n return 0\n\n\ndef install_hooks(config_file: str, store: Store) -> int:\n install_hook_envs(all_hooks(load_config(config_file), store), store)\n return 0\n\n\ndef _uninstall_hook_script(hook_type: str) -> None:\n hook_path, legacy_path = _hook_paths(hook_type)\n\n # If our file doesn't exist or it isn't ours, gtfo.\n if not os.path.exists(hook_path) or not is_our_script(hook_path):\n return\n\n os.remove(hook_path)\n output.write_line(f'{hook_type} uninstalled')\n\n if os.path.exists(legacy_path):\n os.rename(legacy_path, hook_path)\n output.write_line(f'Restored previous hooks to {hook_path}')\n\n\ndef uninstall(hook_types: Sequence[str]) -> int:\n for hook_type in hook_types:\n _uninstall_hook_script(hook_type)\n return 0\n", "path": "pre_commit/commands/install_uninstall.py"}]} | 2,511 | 147 |
gh_patches_debug_16746 | rasdani/github-patches | git_diff | scikit-image__scikit-image-1927 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DOC: Reduce size image in plot_inpaint
In reference to #1920
- Reduce the size of the image in doc/examples/filters/plot_inpaint.py to show more clearly the result of the algorithm.
</issue>
<code>
[start of doc/examples/filters/plot_inpaint.py]
1 """
2 ===========
3 Inpainting
4 ===========
5 Inpainting [1]_ is the process of reconstructing lost or deteriorated
6 parts of images and videos.
7
8 The reconstruction is supposed to be performed in fully automatic way by
9 exploiting the information presented in non-damaged regions.
10
11 In this example, we show how the masked pixels get inpainted by
12 inpainting algorithm based on 'biharmonic equation'-assumption [2]_ [3]_.
13
14 .. [1] Wikipedia. Inpainting
15 https://en.wikipedia.org/wiki/Inpainting
16 .. [2] Wikipedia. Biharmonic equation
17 https://en.wikipedia.org/wiki/Biharmonic_equation
18 .. [3] N.S.Hoang, S.B.Damelin, "On surface completion and image
19 inpainting by biharmonic functions: numerical aspects",
20 http://www.ima.umn.edu/~damelin/biharmonic
21 """
22
23 import numpy as np
24 import matplotlib.pyplot as plt
25
26 from skimage import data, color
27 from skimage.restoration import inpaint
28
29 image_orig = data.astronaut()
30
31 # Create mask with three defect regions: left, middle, right respectively
32 mask = np.zeros(image_orig.shape[:-1])
33 mask[20:60, 0:20] = 1
34 mask[200:300, 150:170] = 1
35 mask[50:100, 400:430] = 1
36
37 # Defect image over the same region in each color channel
38 image_defect = image_orig.copy()
39 for layer in range(image_defect.shape[-1]):
40 image_defect[np.where(mask)] = 0
41
42 image_result = inpaint.inpaint_biharmonic(image_defect, mask, multichannel=True)
43
44 fig, axes = plt.subplots(ncols=2, nrows=2)
45 ax0, ax1, ax2, ax3 = axes.ravel()
46
47 ax0.set_title('Original image')
48 ax0.imshow(image_orig)
49 ax0.axis('off')
50
51 ax1.set_title('Mask')
52 ax1.imshow(mask, cmap=plt.cm.gray)
53 ax1.axis('off')
54
55 ax2.set_title('Defected image')
56 ax2.imshow(image_defect)
57 ax2.axis('off')
58
59 ax3.set_title('Inpainted image')
60 ax3.imshow(image_result)
61 ax3.axis('off')
62
63 plt.tight_layout()
64 plt.show()
65
[end of doc/examples/filters/plot_inpaint.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/doc/examples/filters/plot_inpaint.py b/doc/examples/filters/plot_inpaint.py
--- a/doc/examples/filters/plot_inpaint.py
+++ b/doc/examples/filters/plot_inpaint.py
@@ -26,13 +26,13 @@
from skimage import data, color
from skimage.restoration import inpaint
-image_orig = data.astronaut()
+image_orig = data.astronaut()[0:200, 0:200]
# Create mask with three defect regions: left, middle, right respectively
mask = np.zeros(image_orig.shape[:-1])
mask[20:60, 0:20] = 1
-mask[200:300, 150:170] = 1
-mask[50:100, 400:430] = 1
+mask[160:180, 70:155] = 1
+mask[30:60, 170:195] = 1
# Defect image over the same region in each color channel
image_defect = image_orig.copy()
@@ -60,5 +60,5 @@
ax3.imshow(image_result)
ax3.axis('off')
-plt.tight_layout()
+fig.tight_layout()
plt.show()
| {"golden_diff": "diff --git a/doc/examples/filters/plot_inpaint.py b/doc/examples/filters/plot_inpaint.py\n--- a/doc/examples/filters/plot_inpaint.py\n+++ b/doc/examples/filters/plot_inpaint.py\n@@ -26,13 +26,13 @@\n from skimage import data, color\n from skimage.restoration import inpaint\n \n-image_orig = data.astronaut()\n+image_orig = data.astronaut()[0:200, 0:200]\n \n # Create mask with three defect regions: left, middle, right respectively\n mask = np.zeros(image_orig.shape[:-1])\n mask[20:60, 0:20] = 1\n-mask[200:300, 150:170] = 1\n-mask[50:100, 400:430] = 1\n+mask[160:180, 70:155] = 1\n+mask[30:60, 170:195] = 1\n \n # Defect image over the same region in each color channel\n image_defect = image_orig.copy()\n@@ -60,5 +60,5 @@\n ax3.imshow(image_result)\n ax3.axis('off')\n \n-plt.tight_layout()\n+fig.tight_layout()\n plt.show()\n", "issue": "DOC: Reduce size image in plot_inpaint\nIn reference to #1920 \n- Reduce the size of the image in doc/examples/filters/plot_inpaint.py to show more clearly the result of the algorithm.\n\n", "before_files": [{"content": "\"\"\"\n===========\nInpainting\n===========\nInpainting [1]_ is the process of reconstructing lost or deteriorated\nparts of images and videos.\n\nThe reconstruction is supposed to be performed in fully automatic way by\nexploiting the information presented in non-damaged regions.\n\nIn this example, we show how the masked pixels get inpainted by\ninpainting algorithm based on 'biharmonic equation'-assumption [2]_ [3]_.\n\n.. [1] Wikipedia. Inpainting\n https://en.wikipedia.org/wiki/Inpainting\n.. [2] Wikipedia. Biharmonic equation\n https://en.wikipedia.org/wiki/Biharmonic_equation\n.. [3] N.S.Hoang, S.B.Damelin, \"On surface completion and image\n inpainting by biharmonic functions: numerical aspects\",\n http://www.ima.umn.edu/~damelin/biharmonic\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom skimage import data, color\nfrom skimage.restoration import inpaint\n\nimage_orig = data.astronaut()\n\n# Create mask with three defect regions: left, middle, right respectively\nmask = np.zeros(image_orig.shape[:-1])\nmask[20:60, 0:20] = 1\nmask[200:300, 150:170] = 1\nmask[50:100, 400:430] = 1\n\n# Defect image over the same region in each color channel\nimage_defect = image_orig.copy()\nfor layer in range(image_defect.shape[-1]):\n image_defect[np.where(mask)] = 0\n\nimage_result = inpaint.inpaint_biharmonic(image_defect, mask, multichannel=True)\n\nfig, axes = plt.subplots(ncols=2, nrows=2)\nax0, ax1, ax2, ax3 = axes.ravel()\n\nax0.set_title('Original image')\nax0.imshow(image_orig)\nax0.axis('off')\n\nax1.set_title('Mask')\nax1.imshow(mask, cmap=plt.cm.gray)\nax1.axis('off')\n\nax2.set_title('Defected image')\nax2.imshow(image_defect)\nax2.axis('off')\n\nax3.set_title('Inpainted image')\nax3.imshow(image_result)\nax3.axis('off')\n\nplt.tight_layout()\nplt.show()\n", "path": "doc/examples/filters/plot_inpaint.py"}]} | 1,230 | 300 |
gh_patches_debug_32032 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-376 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[metrics] AUROC Metric can't handle 0 observations of a class with multiclass classifier
I'm attempting to calculate AUROC for a multiclass problem where some classes are very rare, occasionally never seen, and I'm getting the following error: `raise ValueError("No positive samples in targets, true positive value should be meaningless")`
In the case of 0 observations, I feel the `average='weighted'` should work, since the contribution to the final AUROC should be 0 regardless. One can think of other scenarios where there are a very high number of classes, some of which will happen to not be seen in some dataset.
_Originally posted by @BeyondTheProof in https://github.com/PyTorchLightning/pytorch-lightning/issues/2210#issuecomment-872440776_
</issue>
<code>
[start of torchmetrics/functional/classification/auroc.py]
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Optional, Sequence, Tuple
15
16 import torch
17 from torch import Tensor, tensor
18
19 from torchmetrics.functional.classification.auc import _auc_compute_without_check
20 from torchmetrics.functional.classification.roc import roc
21 from torchmetrics.utilities.checks import _input_format_classification
22 from torchmetrics.utilities.enums import AverageMethod, DataType
23 from torchmetrics.utilities.imports import _TORCH_LOWER_1_6
24
25
26 def _auroc_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor, DataType]:
27 # use _input_format_classification for validating the input and get the mode of data
28 _, _, mode = _input_format_classification(preds, target)
29
30 if mode == 'multi class multi dim':
31 n_classes = preds.shape[1]
32 preds = preds.transpose(0, 1).reshape(n_classes, -1).transpose(0, 1)
33 target = target.flatten()
34 if mode == 'multi-label' and preds.ndim > 2:
35 n_classes = preds.shape[1]
36 preds = preds.transpose(0, 1).reshape(n_classes, -1).transpose(0, 1)
37 target = target.transpose(0, 1).reshape(n_classes, -1).transpose(0, 1)
38
39 return preds, target, mode
40
41
42 def _auroc_compute(
43 preds: Tensor,
44 target: Tensor,
45 mode: DataType,
46 num_classes: Optional[int] = None,
47 pos_label: Optional[int] = None,
48 average: Optional[str] = 'macro',
49 max_fpr: Optional[float] = None,
50 sample_weights: Optional[Sequence] = None,
51 ) -> Tensor:
52 # binary mode override num_classes
53 if mode == DataType.BINARY:
54 num_classes = 1
55
56 # check max_fpr parameter
57 if max_fpr is not None:
58 if not isinstance(max_fpr, float) and 0 < max_fpr <= 1:
59 raise ValueError(f"`max_fpr` should be a float in range (0, 1], got: {max_fpr}")
60
61 if _TORCH_LOWER_1_6:
62 raise RuntimeError(
63 "`max_fpr` argument requires `torch.bucketize` which"
64 " is not available below PyTorch version 1.6"
65 )
66
67 # max_fpr parameter is only support for binary
68 if mode != DataType.BINARY:
69 raise ValueError(
70 f"Partial AUC computation not available in"
71 f" multilabel/multiclass setting, 'max_fpr' must be"
72 f" set to `None`, received `{max_fpr}`."
73 )
74
75 # calculate fpr, tpr
76 if mode == DataType.MULTILABEL:
77 if average == AverageMethod.MICRO:
78 fpr, tpr, _ = roc(preds.flatten(), target.flatten(), 1, pos_label, sample_weights)
79 elif num_classes:
80 # for multilabel we iteratively evaluate roc in a binary fashion
81 output = [
82 roc(preds[:, i], target[:, i], num_classes=1, pos_label=1, sample_weights=sample_weights)
83 for i in range(num_classes)
84 ]
85 fpr = [o[0] for o in output]
86 tpr = [o[1] for o in output]
87 else:
88 raise ValueError('Detected input to be `multilabel` but you did not provide `num_classes` argument')
89 else:
90 if mode != DataType.BINARY and num_classes is None:
91 raise ValueError('Detected input to `multiclass` but you did not provide `num_classes` argument')
92 fpr, tpr, _ = roc(preds, target, num_classes, pos_label, sample_weights)
93
94 # calculate standard roc auc score
95 if max_fpr is None or max_fpr == 1:
96 if mode == DataType.MULTILABEL and average == AverageMethod.MICRO:
97 pass
98 elif num_classes != 1:
99 # calculate auc scores per class
100 auc_scores = [_auc_compute_without_check(x, y, 1.0) for x, y in zip(fpr, tpr)]
101
102 # calculate average
103 if average == AverageMethod.NONE:
104 return tensor(auc_scores)
105 if average == AverageMethod.MACRO:
106 return torch.mean(torch.stack(auc_scores))
107 if average == AverageMethod.WEIGHTED:
108 if mode == DataType.MULTILABEL:
109 support = torch.sum(target, dim=0)
110 else:
111 support = torch.bincount(target.flatten(), minlength=num_classes)
112 return torch.sum(torch.stack(auc_scores) * support / support.sum())
113
114 allowed_average = (AverageMethod.NONE.value, AverageMethod.MACRO.value, AverageMethod.WEIGHTED.value)
115 raise ValueError(
116 f"Argument `average` expected to be one of the following:"
117 f" {allowed_average} but got {average}"
118 )
119
120 return _auc_compute_without_check(fpr, tpr, 1.0)
121
122 _device = fpr.device if isinstance(fpr, Tensor) else fpr[0].device
123 max_area: Tensor = tensor(max_fpr, device=_device)
124 # Add a single point at max_fpr and interpolate its tpr value
125 stop = torch.bucketize(max_area, fpr, out_int32=True, right=True)
126 weight = (max_area - fpr[stop - 1]) / (fpr[stop] - fpr[stop - 1])
127 interp_tpr: Tensor = torch.lerp(tpr[stop - 1], tpr[stop], weight)
128 tpr = torch.cat([tpr[:stop], interp_tpr.view(1)])
129 fpr = torch.cat([fpr[:stop], max_area.view(1)])
130
131 # Compute partial AUC
132 partial_auc = _auc_compute_without_check(fpr, tpr, 1.0)
133
134 # McClish correction: standardize result to be 0.5 if non-discriminant and 1 if maximal
135 min_area: Tensor = 0.5 * max_area**2
136 return 0.5 * (1 + (partial_auc - min_area) / (max_area - min_area))
137
138
139 def auroc(
140 preds: Tensor,
141 target: Tensor,
142 num_classes: Optional[int] = None,
143 pos_label: Optional[int] = None,
144 average: Optional[str] = 'macro',
145 max_fpr: Optional[float] = None,
146 sample_weights: Optional[Sequence] = None,
147 ) -> Tensor:
148 """ Compute `Area Under the Receiver Operating Characteristic Curve (ROC AUC)
149 <https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Further_interpretations>`_
150
151 Args:
152 preds: predictions from model (logits or probabilities)
153 target: Ground truth labels
154 num_classes: integer with number of classes for multi-label and multiclass problems.
155 Should be set to ``None`` for binary problems
156 pos_label: integer determining the positive class. Default is ``None``
157 which for binary problem is translate to 1. For multiclass problems
158 this argument should not be set as we iteratively change it in the
159 range [0,num_classes-1]
160 average:
161 - ``'micro'`` computes metric globally. Only works for multilabel problems
162 - ``'macro'`` computes metric for each class and uniformly averages them
163 - ``'weighted'`` computes metric for each class and does a weighted-average,
164 where each class is weighted by their support (accounts for class imbalance)
165 - ``None`` computes and returns the metric per class
166 max_fpr:
167 If not ``None``, calculates standardized partial AUC over the
168 range [0, max_fpr]. Should be a float between 0 and 1.
169 sample_weights: sample weights for each data point
170
171 Raises:
172 ValueError:
173 If ``max_fpr`` is not a ``float`` in the range ``(0, 1]``.
174 RuntimeError:
175 If ``PyTorch version`` is ``below 1.6`` since max_fpr requires `torch.bucketize`
176 which is not available below 1.6.
177 ValueError:
178 If ``max_fpr`` is not set to ``None`` and the mode is ``not binary``
179 since partial AUC computation is not available in multilabel/multiclass.
180 ValueError:
181 If ``average`` is none of ``None``, ``"macro"`` or ``"weighted"``.
182
183 Example (binary case):
184 >>> from torchmetrics.functional import auroc
185 >>> preds = torch.tensor([0.13, 0.26, 0.08, 0.19, 0.34])
186 >>> target = torch.tensor([0, 0, 1, 1, 1])
187 >>> auroc(preds, target, pos_label=1)
188 tensor(0.5000)
189
190 Example (multiclass case):
191 >>> preds = torch.tensor([[0.90, 0.05, 0.05],
192 ... [0.05, 0.90, 0.05],
193 ... [0.05, 0.05, 0.90],
194 ... [0.85, 0.05, 0.10],
195 ... [0.10, 0.10, 0.80]])
196 >>> target = torch.tensor([0, 1, 1, 2, 2])
197 >>> auroc(preds, target, num_classes=3)
198 tensor(0.7778)
199 """
200 preds, target, mode = _auroc_update(preds, target)
201 return _auroc_compute(preds, target, mode, num_classes, pos_label, average, max_fpr, sample_weights)
202
[end of torchmetrics/functional/classification/auroc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchmetrics/functional/classification/auroc.py b/torchmetrics/functional/classification/auroc.py
--- a/torchmetrics/functional/classification/auroc.py
+++ b/torchmetrics/functional/classification/auroc.py
@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import warnings
from typing import Optional, Sequence, Tuple
import torch
@@ -87,8 +88,23 @@
else:
raise ValueError('Detected input to be `multilabel` but you did not provide `num_classes` argument')
else:
- if mode != DataType.BINARY and num_classes is None:
- raise ValueError('Detected input to `multiclass` but you did not provide `num_classes` argument')
+ if mode != DataType.BINARY:
+ if num_classes is None:
+ raise ValueError("Detected input to `multiclass` but you did not provide `num_classes` argument")
+ if average == AverageMethod.WEIGHTED and len(torch.unique(target)) < num_classes:
+ # If one or more classes has 0 observations, we should exclude them, as its weight will be 0
+ target_bool_mat = torch.zeros((len(target), num_classes), dtype=bool)
+ target_bool_mat[torch.arange(len(target)), target.long()] = 1
+ class_observed = target_bool_mat.sum(axis=0) > 0
+ for c in range(num_classes):
+ if not class_observed[c]:
+ warnings.warn(f'Class {c} had 0 observations, omitted from AUROC calculation', UserWarning)
+ preds = preds[:, class_observed]
+ target = target_bool_mat[:, class_observed]
+ target = torch.where(target)[1]
+ num_classes = class_observed.sum()
+ if num_classes == 1:
+ raise ValueError('Found 1 non-empty class in `multiclass` AUROC calculation')
fpr, tpr, _ = roc(preds, target, num_classes, pos_label, sample_weights)
# calculate standard roc auc score
| {"golden_diff": "diff --git a/torchmetrics/functional/classification/auroc.py b/torchmetrics/functional/classification/auroc.py\n--- a/torchmetrics/functional/classification/auroc.py\n+++ b/torchmetrics/functional/classification/auroc.py\n@@ -11,6 +11,7 @@\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n+import warnings\n from typing import Optional, Sequence, Tuple\n \n import torch\n@@ -87,8 +88,23 @@\n else:\n raise ValueError('Detected input to be `multilabel` but you did not provide `num_classes` argument')\n else:\n- if mode != DataType.BINARY and num_classes is None:\n- raise ValueError('Detected input to `multiclass` but you did not provide `num_classes` argument')\n+ if mode != DataType.BINARY:\n+ if num_classes is None:\n+ raise ValueError(\"Detected input to `multiclass` but you did not provide `num_classes` argument\")\n+ if average == AverageMethod.WEIGHTED and len(torch.unique(target)) < num_classes:\n+ # If one or more classes has 0 observations, we should exclude them, as its weight will be 0\n+ target_bool_mat = torch.zeros((len(target), num_classes), dtype=bool)\n+ target_bool_mat[torch.arange(len(target)), target.long()] = 1\n+ class_observed = target_bool_mat.sum(axis=0) > 0\n+ for c in range(num_classes):\n+ if not class_observed[c]:\n+ warnings.warn(f'Class {c} had 0 observations, omitted from AUROC calculation', UserWarning)\n+ preds = preds[:, class_observed]\n+ target = target_bool_mat[:, class_observed]\n+ target = torch.where(target)[1]\n+ num_classes = class_observed.sum()\n+ if num_classes == 1:\n+ raise ValueError('Found 1 non-empty class in `multiclass` AUROC calculation')\n fpr, tpr, _ = roc(preds, target, num_classes, pos_label, sample_weights)\n \n # calculate standard roc auc score\n", "issue": "[metrics] AUROC Metric can't handle 0 observations of a class with multiclass classifier\nI'm attempting to calculate AUROC for a multiclass problem where some classes are very rare, occasionally never seen, and I'm getting the following error: `raise ValueError(\"No positive samples in targets, true positive value should be meaningless\")`\r\n\r\nIn the case of 0 observations, I feel the `average='weighted'` should work, since the contribution to the final AUROC should be 0 regardless. One can think of other scenarios where there are a very high number of classes, some of which will happen to not be seen in some dataset.\r\n\r\n_Originally posted by @BeyondTheProof in https://github.com/PyTorchLightning/pytorch-lightning/issues/2210#issuecomment-872440776_\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Optional, Sequence, Tuple\n\nimport torch\nfrom torch import Tensor, tensor\n\nfrom torchmetrics.functional.classification.auc import _auc_compute_without_check\nfrom torchmetrics.functional.classification.roc import roc\nfrom torchmetrics.utilities.checks import _input_format_classification\nfrom torchmetrics.utilities.enums import AverageMethod, DataType\nfrom torchmetrics.utilities.imports import _TORCH_LOWER_1_6\n\n\ndef _auroc_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor, DataType]:\n # use _input_format_classification for validating the input and get the mode of data\n _, _, mode = _input_format_classification(preds, target)\n\n if mode == 'multi class multi dim':\n n_classes = preds.shape[1]\n preds = preds.transpose(0, 1).reshape(n_classes, -1).transpose(0, 1)\n target = target.flatten()\n if mode == 'multi-label' and preds.ndim > 2:\n n_classes = preds.shape[1]\n preds = preds.transpose(0, 1).reshape(n_classes, -1).transpose(0, 1)\n target = target.transpose(0, 1).reshape(n_classes, -1).transpose(0, 1)\n\n return preds, target, mode\n\n\ndef _auroc_compute(\n preds: Tensor,\n target: Tensor,\n mode: DataType,\n num_classes: Optional[int] = None,\n pos_label: Optional[int] = None,\n average: Optional[str] = 'macro',\n max_fpr: Optional[float] = None,\n sample_weights: Optional[Sequence] = None,\n) -> Tensor:\n # binary mode override num_classes\n if mode == DataType.BINARY:\n num_classes = 1\n\n # check max_fpr parameter\n if max_fpr is not None:\n if not isinstance(max_fpr, float) and 0 < max_fpr <= 1:\n raise ValueError(f\"`max_fpr` should be a float in range (0, 1], got: {max_fpr}\")\n\n if _TORCH_LOWER_1_6:\n raise RuntimeError(\n \"`max_fpr` argument requires `torch.bucketize` which\"\n \" is not available below PyTorch version 1.6\"\n )\n\n # max_fpr parameter is only support for binary\n if mode != DataType.BINARY:\n raise ValueError(\n f\"Partial AUC computation not available in\"\n f\" multilabel/multiclass setting, 'max_fpr' must be\"\n f\" set to `None`, received `{max_fpr}`.\"\n )\n\n # calculate fpr, tpr\n if mode == DataType.MULTILABEL:\n if average == AverageMethod.MICRO:\n fpr, tpr, _ = roc(preds.flatten(), target.flatten(), 1, pos_label, sample_weights)\n elif num_classes:\n # for multilabel we iteratively evaluate roc in a binary fashion\n output = [\n roc(preds[:, i], target[:, i], num_classes=1, pos_label=1, sample_weights=sample_weights)\n for i in range(num_classes)\n ]\n fpr = [o[0] for o in output]\n tpr = [o[1] for o in output]\n else:\n raise ValueError('Detected input to be `multilabel` but you did not provide `num_classes` argument')\n else:\n if mode != DataType.BINARY and num_classes is None:\n raise ValueError('Detected input to `multiclass` but you did not provide `num_classes` argument')\n fpr, tpr, _ = roc(preds, target, num_classes, pos_label, sample_weights)\n\n # calculate standard roc auc score\n if max_fpr is None or max_fpr == 1:\n if mode == DataType.MULTILABEL and average == AverageMethod.MICRO:\n pass\n elif num_classes != 1:\n # calculate auc scores per class\n auc_scores = [_auc_compute_without_check(x, y, 1.0) for x, y in zip(fpr, tpr)]\n\n # calculate average\n if average == AverageMethod.NONE:\n return tensor(auc_scores)\n if average == AverageMethod.MACRO:\n return torch.mean(torch.stack(auc_scores))\n if average == AverageMethod.WEIGHTED:\n if mode == DataType.MULTILABEL:\n support = torch.sum(target, dim=0)\n else:\n support = torch.bincount(target.flatten(), minlength=num_classes)\n return torch.sum(torch.stack(auc_scores) * support / support.sum())\n\n allowed_average = (AverageMethod.NONE.value, AverageMethod.MACRO.value, AverageMethod.WEIGHTED.value)\n raise ValueError(\n f\"Argument `average` expected to be one of the following:\"\n f\" {allowed_average} but got {average}\"\n )\n\n return _auc_compute_without_check(fpr, tpr, 1.0)\n\n _device = fpr.device if isinstance(fpr, Tensor) else fpr[0].device\n max_area: Tensor = tensor(max_fpr, device=_device)\n # Add a single point at max_fpr and interpolate its tpr value\n stop = torch.bucketize(max_area, fpr, out_int32=True, right=True)\n weight = (max_area - fpr[stop - 1]) / (fpr[stop] - fpr[stop - 1])\n interp_tpr: Tensor = torch.lerp(tpr[stop - 1], tpr[stop], weight)\n tpr = torch.cat([tpr[:stop], interp_tpr.view(1)])\n fpr = torch.cat([fpr[:stop], max_area.view(1)])\n\n # Compute partial AUC\n partial_auc = _auc_compute_without_check(fpr, tpr, 1.0)\n\n # McClish correction: standardize result to be 0.5 if non-discriminant and 1 if maximal\n min_area: Tensor = 0.5 * max_area**2\n return 0.5 * (1 + (partial_auc - min_area) / (max_area - min_area))\n\n\ndef auroc(\n preds: Tensor,\n target: Tensor,\n num_classes: Optional[int] = None,\n pos_label: Optional[int] = None,\n average: Optional[str] = 'macro',\n max_fpr: Optional[float] = None,\n sample_weights: Optional[Sequence] = None,\n) -> Tensor:\n \"\"\" Compute `Area Under the Receiver Operating Characteristic Curve (ROC AUC)\n <https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Further_interpretations>`_\n\n Args:\n preds: predictions from model (logits or probabilities)\n target: Ground truth labels\n num_classes: integer with number of classes for multi-label and multiclass problems.\n Should be set to ``None`` for binary problems\n pos_label: integer determining the positive class. Default is ``None``\n which for binary problem is translate to 1. For multiclass problems\n this argument should not be set as we iteratively change it in the\n range [0,num_classes-1]\n average:\n - ``'micro'`` computes metric globally. Only works for multilabel problems\n - ``'macro'`` computes metric for each class and uniformly averages them\n - ``'weighted'`` computes metric for each class and does a weighted-average,\n where each class is weighted by their support (accounts for class imbalance)\n - ``None`` computes and returns the metric per class\n max_fpr:\n If not ``None``, calculates standardized partial AUC over the\n range [0, max_fpr]. Should be a float between 0 and 1.\n sample_weights: sample weights for each data point\n\n Raises:\n ValueError:\n If ``max_fpr`` is not a ``float`` in the range ``(0, 1]``.\n RuntimeError:\n If ``PyTorch version`` is ``below 1.6`` since max_fpr requires `torch.bucketize`\n which is not available below 1.6.\n ValueError:\n If ``max_fpr`` is not set to ``None`` and the mode is ``not binary``\n since partial AUC computation is not available in multilabel/multiclass.\n ValueError:\n If ``average`` is none of ``None``, ``\"macro\"`` or ``\"weighted\"``.\n\n Example (binary case):\n >>> from torchmetrics.functional import auroc\n >>> preds = torch.tensor([0.13, 0.26, 0.08, 0.19, 0.34])\n >>> target = torch.tensor([0, 0, 1, 1, 1])\n >>> auroc(preds, target, pos_label=1)\n tensor(0.5000)\n\n Example (multiclass case):\n >>> preds = torch.tensor([[0.90, 0.05, 0.05],\n ... [0.05, 0.90, 0.05],\n ... [0.05, 0.05, 0.90],\n ... [0.85, 0.05, 0.10],\n ... [0.10, 0.10, 0.80]])\n >>> target = torch.tensor([0, 1, 1, 2, 2])\n >>> auroc(preds, target, num_classes=3)\n tensor(0.7778)\n \"\"\"\n preds, target, mode = _auroc_update(preds, target)\n return _auroc_compute(preds, target, mode, num_classes, pos_label, average, max_fpr, sample_weights)\n", "path": "torchmetrics/functional/classification/auroc.py"}]} | 3,492 | 486 |
gh_patches_debug_57638 | rasdani/github-patches | git_diff | facebookresearch__ParlAI-1447 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error when running DrQA PyTorch 1.0.0
When running the basic example on SQUAD
```python examples/train_model.py -m drqa -t squad -bs 32```
Throwing this.
```[ training... ]
/content/DuReader/data/ParlAI/parlai/agents/drqa/layers.py:177: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.
alpha_flat = F.softmax(scores.view(-1, y.size(1)))
/content/DuReader/data/ParlAI/parlai/agents/drqa/layers.py:237: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.
alpha = F.softmax(scores)
/content/DuReader/data/ParlAI/parlai/agents/drqa/layers.py:210: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.
alpha = F.log_softmax(xWy)
Traceback (most recent call last):
File "examples/train_model.py", line 16, in <module>
TrainLoop(opt).train()
File "/content/DuReader/data/ParlAI/parlai/scripts/train_model.py", line 500, in train
world.parley()
File "/content/DuReader/data/ParlAI/parlai/core/worlds.py", line 641, in parley
batch_act = self.batch_act(agent_idx, batch_observations[agent_idx])
File "/content/DuReader/data/ParlAI/parlai/core/worlds.py", line 614, in batch_act
batch_actions = a.batch_act(batch_observation)
File "/content/DuReader/data/ParlAI/parlai/agents/drqa/drqa.py", line 227, in batch_act
self.model.update(batch)
File "/content/DuReader/data/ParlAI/parlai/agents/drqa/model.py", line 102, in update
self.train_loss.update(loss.data[0], ex[0].size(0))
IndexError: invalid index of a 0-dim tensor. Use tensor.item() to convert a 0-dim tensor to a Python number
```
I think this is related to the new version of PyTorch and the fix should be fairly trivial. Anyone working on fixing it soon? If not I could probably do a PR.
</issue>
<code>
[start of parlai/agents/drqa/model.py]
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6 import torch
7 import torch.optim as optim
8 import torch.nn.functional as F
9 import numpy as np
10 import logging
11
12 from torch.autograd import Variable
13 from .utils import load_embeddings, AverageMeter
14 from .rnn_reader import RnnDocReader
15
16 logger = logging.getLogger('DrQA')
17
18
19 class DocReaderModel(object):
20 """High level model that handles intializing the underlying network
21 architecture, saving, updating examples, and predicting examples.
22 """
23
24 def __init__(self, opt, word_dict, feature_dict, state_dict=None):
25 # Book-keeping.
26 self.opt = opt
27 self.word_dict = word_dict
28 self.feature_dict = feature_dict
29 self.updates = 0
30 self.train_loss = AverageMeter()
31
32 # Building network.
33 self.network = RnnDocReader(opt)
34 if state_dict:
35 new_state = set(self.network.state_dict().keys())
36 for k in list(state_dict['network'].keys()):
37 if k not in new_state:
38 del state_dict['network'][k]
39 self.network.load_state_dict(state_dict['network'])
40
41 # Building optimizer.
42 parameters = [p for p in self.network.parameters() if p.requires_grad]
43 if opt['optimizer'] == 'sgd':
44 self.optimizer = optim.SGD(parameters, opt['learning_rate'],
45 momentum=opt['momentum'],
46 weight_decay=opt['weight_decay'])
47 elif opt['optimizer'] == 'adamax':
48 self.optimizer = optim.Adamax(parameters,
49 weight_decay=opt['weight_decay'])
50 else:
51 raise RuntimeError('Unsupported optimizer: %s' % opt['optimizer'])
52
53 def set_embeddings(self):
54 # Read word embeddings.
55 if not self.opt.get('embedding_file'):
56 logger.warning('[ WARNING: No embeddings provided. '
57 'Keeping random initialization. ]')
58 return
59 logger.info('[ Loading pre-trained embeddings ]')
60 embeddings = load_embeddings(self.opt, self.word_dict)
61 logger.info('[ Num embeddings = %d ]' % embeddings.size(0))
62
63 # Sanity check dimensions
64 new_size = embeddings.size()
65 old_size = self.network.embedding.weight.size()
66 if new_size[1] != old_size[1]:
67 raise RuntimeError('Embedding dimensions do not match.')
68 if new_size[0] != old_size[0]:
69 logger.warning(
70 '[ WARNING: Number of embeddings changed (%d->%d) ]' %
71 (old_size[0], new_size[0])
72 )
73
74 # Swap weights
75 self.network.embedding.weight.data = embeddings
76
77 # If partially tuning the embeddings, keep the old values
78 if self.opt['tune_partial'] > 0:
79 if self.opt['tune_partial'] + 2 < embeddings.size(0):
80 fixed_embedding = embeddings[self.opt['tune_partial'] + 2:]
81 self.network.fixed_embedding = fixed_embedding
82
83 def update(self, ex):
84 # Train mode
85 self.network.train()
86
87 # Transfer to GPU
88 if self.opt['cuda']:
89 inputs = [Variable(e.cuda(non_blocking=True)) for e in ex[:5]]
90 target_s = Variable(ex[5].cuda(non_blocking=True))
91 target_e = Variable(ex[6].cuda(non_blocking=True))
92 else:
93 inputs = [Variable(e) for e in ex[:5]]
94 target_s = Variable(ex[5])
95 target_e = Variable(ex[6])
96
97 # Run forward
98 score_s, score_e = self.network(*inputs)
99
100 # Compute loss and accuracies
101 loss = F.nll_loss(score_s, target_s) + F.nll_loss(score_e, target_e)
102 self.train_loss.update(loss.data[0], ex[0].size(0))
103
104 # Clear gradients and run backward
105 self.optimizer.zero_grad()
106 loss.backward()
107
108 # Clip gradients
109 torch.nn.utils.clip_grad_norm(self.network.parameters(),
110 self.opt['grad_clipping'])
111
112 # Update parameters
113 self.optimizer.step()
114 self.updates += 1
115
116 # Reset any partially fixed parameters (e.g. rare words)
117 self.reset_parameters()
118
119 def predict(self, ex):
120 # Eval mode
121 self.network.eval()
122
123 # Transfer to GPU
124 if self.opt['cuda']:
125 inputs = [Variable(e.cuda(non_blocking=True), volatile=True)
126 for e in ex[:5]]
127 else:
128 inputs = [Variable(e, volatile=True) for e in ex[:5]]
129
130 # Run forward
131 score_s, score_e = self.network(*inputs)
132
133 # Transfer to CPU/normal tensors for numpy ops
134 score_s = score_s.data.cpu()
135 score_e = score_e.data.cpu()
136
137 # Get argmax text spans
138 text = ex[-2]
139 spans = ex[-1]
140 predictions = []
141 pred_scores = []
142 max_len = self.opt['max_len'] or score_s.size(1)
143 for i in range(score_s.size(0)):
144 scores = torch.ger(score_s[i], score_e[i])
145 scores.triu_().tril_(max_len - 1)
146 scores = scores.numpy()
147 s_idx, e_idx = np.unravel_index(np.argmax(scores), scores.shape)
148 s_offset, e_offset = spans[i][s_idx][0], spans[i][e_idx][1]
149 predictions.append(text[i][s_offset:e_offset])
150 pred_scores.append(np.max(scores))
151
152 return predictions, pred_scores
153
154 def reset_parameters(self):
155 # Reset fixed embeddings to original value
156 if self.opt['tune_partial'] > 0:
157 offset = self.opt['tune_partial'] + 2
158 if offset < self.network.embedding.weight.data.size(0):
159 self.network.embedding.weight.data[offset:] \
160 = self.network.fixed_embedding
161
162 def save(self, filename):
163 params = {
164 'state_dict': {
165 'network': self.network.state_dict(),
166 },
167 'feature_dict': self.feature_dict,
168 'config': self.opt,
169 }
170 try:
171 torch.save(params, filename)
172 except BaseException:
173 logger.warn('[ WARN: Saving failed... continuing anyway. ]')
174
175 def cuda(self):
176 self.network.cuda()
177
[end of parlai/agents/drqa/model.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parlai/agents/drqa/model.py b/parlai/agents/drqa/model.py
--- a/parlai/agents/drqa/model.py
+++ b/parlai/agents/drqa/model.py
@@ -99,7 +99,7 @@
# Compute loss and accuracies
loss = F.nll_loss(score_s, target_s) + F.nll_loss(score_e, target_e)
- self.train_loss.update(loss.data[0], ex[0].size(0))
+ self.train_loss.update(loss.data.item(), ex[0].size(0))
# Clear gradients and run backward
self.optimizer.zero_grad()
| {"golden_diff": "diff --git a/parlai/agents/drqa/model.py b/parlai/agents/drqa/model.py\n--- a/parlai/agents/drqa/model.py\n+++ b/parlai/agents/drqa/model.py\n@@ -99,7 +99,7 @@\n \n # Compute loss and accuracies\n loss = F.nll_loss(score_s, target_s) + F.nll_loss(score_e, target_e)\n- self.train_loss.update(loss.data[0], ex[0].size(0))\n+ self.train_loss.update(loss.data.item(), ex[0].size(0))\n \n # Clear gradients and run backward\n self.optimizer.zero_grad()\n", "issue": "Error when running DrQA PyTorch 1.0.0\nWhen running the basic example on SQUAD \r\n```python examples/train_model.py -m drqa -t squad -bs 32```\r\nThrowing this. \r\n```[ training... ]\r\n/content/DuReader/data/ParlAI/parlai/agents/drqa/layers.py:177: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\r\n alpha_flat = F.softmax(scores.view(-1, y.size(1)))\r\n/content/DuReader/data/ParlAI/parlai/agents/drqa/layers.py:237: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\r\n alpha = F.softmax(scores)\r\n/content/DuReader/data/ParlAI/parlai/agents/drqa/layers.py:210: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.\r\n alpha = F.log_softmax(xWy)\r\nTraceback (most recent call last):\r\n File \"examples/train_model.py\", line 16, in <module>\r\n TrainLoop(opt).train()\r\n File \"/content/DuReader/data/ParlAI/parlai/scripts/train_model.py\", line 500, in train\r\n world.parley()\r\n File \"/content/DuReader/data/ParlAI/parlai/core/worlds.py\", line 641, in parley\r\n batch_act = self.batch_act(agent_idx, batch_observations[agent_idx])\r\n File \"/content/DuReader/data/ParlAI/parlai/core/worlds.py\", line 614, in batch_act\r\n batch_actions = a.batch_act(batch_observation)\r\n File \"/content/DuReader/data/ParlAI/parlai/agents/drqa/drqa.py\", line 227, in batch_act\r\n self.model.update(batch)\r\n File \"/content/DuReader/data/ParlAI/parlai/agents/drqa/model.py\", line 102, in update\r\n self.train_loss.update(loss.data[0], ex[0].size(0))\r\nIndexError: invalid index of a 0-dim tensor. Use tensor.item() to convert a 0-dim tensor to a Python number\r\n```\r\nI think this is related to the new version of PyTorch and the fix should be fairly trivial. Anyone working on fixing it soon? If not I could probably do a PR. \n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nimport torch\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport numpy as np\nimport logging\n\nfrom torch.autograd import Variable\nfrom .utils import load_embeddings, AverageMeter\nfrom .rnn_reader import RnnDocReader\n\nlogger = logging.getLogger('DrQA')\n\n\nclass DocReaderModel(object):\n \"\"\"High level model that handles intializing the underlying network\n architecture, saving, updating examples, and predicting examples.\n \"\"\"\n\n def __init__(self, opt, word_dict, feature_dict, state_dict=None):\n # Book-keeping.\n self.opt = opt\n self.word_dict = word_dict\n self.feature_dict = feature_dict\n self.updates = 0\n self.train_loss = AverageMeter()\n\n # Building network.\n self.network = RnnDocReader(opt)\n if state_dict:\n new_state = set(self.network.state_dict().keys())\n for k in list(state_dict['network'].keys()):\n if k not in new_state:\n del state_dict['network'][k]\n self.network.load_state_dict(state_dict['network'])\n\n # Building optimizer.\n parameters = [p for p in self.network.parameters() if p.requires_grad]\n if opt['optimizer'] == 'sgd':\n self.optimizer = optim.SGD(parameters, opt['learning_rate'],\n momentum=opt['momentum'],\n weight_decay=opt['weight_decay'])\n elif opt['optimizer'] == 'adamax':\n self.optimizer = optim.Adamax(parameters,\n weight_decay=opt['weight_decay'])\n else:\n raise RuntimeError('Unsupported optimizer: %s' % opt['optimizer'])\n\n def set_embeddings(self):\n # Read word embeddings.\n if not self.opt.get('embedding_file'):\n logger.warning('[ WARNING: No embeddings provided. '\n 'Keeping random initialization. ]')\n return\n logger.info('[ Loading pre-trained embeddings ]')\n embeddings = load_embeddings(self.opt, self.word_dict)\n logger.info('[ Num embeddings = %d ]' % embeddings.size(0))\n\n # Sanity check dimensions\n new_size = embeddings.size()\n old_size = self.network.embedding.weight.size()\n if new_size[1] != old_size[1]:\n raise RuntimeError('Embedding dimensions do not match.')\n if new_size[0] != old_size[0]:\n logger.warning(\n '[ WARNING: Number of embeddings changed (%d->%d) ]' %\n (old_size[0], new_size[0])\n )\n\n # Swap weights\n self.network.embedding.weight.data = embeddings\n\n # If partially tuning the embeddings, keep the old values\n if self.opt['tune_partial'] > 0:\n if self.opt['tune_partial'] + 2 < embeddings.size(0):\n fixed_embedding = embeddings[self.opt['tune_partial'] + 2:]\n self.network.fixed_embedding = fixed_embedding\n\n def update(self, ex):\n # Train mode\n self.network.train()\n\n # Transfer to GPU\n if self.opt['cuda']:\n inputs = [Variable(e.cuda(non_blocking=True)) for e in ex[:5]]\n target_s = Variable(ex[5].cuda(non_blocking=True))\n target_e = Variable(ex[6].cuda(non_blocking=True))\n else:\n inputs = [Variable(e) for e in ex[:5]]\n target_s = Variable(ex[5])\n target_e = Variable(ex[6])\n\n # Run forward\n score_s, score_e = self.network(*inputs)\n\n # Compute loss and accuracies\n loss = F.nll_loss(score_s, target_s) + F.nll_loss(score_e, target_e)\n self.train_loss.update(loss.data[0], ex[0].size(0))\n\n # Clear gradients and run backward\n self.optimizer.zero_grad()\n loss.backward()\n\n # Clip gradients\n torch.nn.utils.clip_grad_norm(self.network.parameters(),\n self.opt['grad_clipping'])\n\n # Update parameters\n self.optimizer.step()\n self.updates += 1\n\n # Reset any partially fixed parameters (e.g. rare words)\n self.reset_parameters()\n\n def predict(self, ex):\n # Eval mode\n self.network.eval()\n\n # Transfer to GPU\n if self.opt['cuda']:\n inputs = [Variable(e.cuda(non_blocking=True), volatile=True)\n for e in ex[:5]]\n else:\n inputs = [Variable(e, volatile=True) for e in ex[:5]]\n\n # Run forward\n score_s, score_e = self.network(*inputs)\n\n # Transfer to CPU/normal tensors for numpy ops\n score_s = score_s.data.cpu()\n score_e = score_e.data.cpu()\n\n # Get argmax text spans\n text = ex[-2]\n spans = ex[-1]\n predictions = []\n pred_scores = []\n max_len = self.opt['max_len'] or score_s.size(1)\n for i in range(score_s.size(0)):\n scores = torch.ger(score_s[i], score_e[i])\n scores.triu_().tril_(max_len - 1)\n scores = scores.numpy()\n s_idx, e_idx = np.unravel_index(np.argmax(scores), scores.shape)\n s_offset, e_offset = spans[i][s_idx][0], spans[i][e_idx][1]\n predictions.append(text[i][s_offset:e_offset])\n pred_scores.append(np.max(scores))\n\n return predictions, pred_scores\n\n def reset_parameters(self):\n # Reset fixed embeddings to original value\n if self.opt['tune_partial'] > 0:\n offset = self.opt['tune_partial'] + 2\n if offset < self.network.embedding.weight.data.size(0):\n self.network.embedding.weight.data[offset:] \\\n = self.network.fixed_embedding\n\n def save(self, filename):\n params = {\n 'state_dict': {\n 'network': self.network.state_dict(),\n },\n 'feature_dict': self.feature_dict,\n 'config': self.opt,\n }\n try:\n torch.save(params, filename)\n except BaseException:\n logger.warn('[ WARN: Saving failed... continuing anyway. ]')\n\n def cuda(self):\n self.network.cuda()\n", "path": "parlai/agents/drqa/model.py"}]} | 2,884 | 149 |
gh_patches_debug_37093 | rasdani/github-patches | git_diff | TheAlgorithms__Python-2221 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
maths/number_of_digits.py is naive!
<code>[maths/number_of_digits.py](https://github.com/TheAlgorithms/Python/blob/master/maths/number_of_digits.py)</code> has a naive method. The suggested method has a time complexity of O(n). We can count number of digits of a number in O(1).
</issue>
<code>
[start of maths/number_of_digits.py]
1 def num_digits(n: int) -> int:
2 """
3 Find the number of digits in a number.
4
5 >>> num_digits(12345)
6 5
7 >>> num_digits(123)
8 3
9 """
10 digits = 0
11 while n > 0:
12 n = n // 10
13 digits += 1
14 return digits
15
16
17 if __name__ == "__main__":
18 print(num_digits(12345)) # ===> 5
19
[end of maths/number_of_digits.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/maths/number_of_digits.py b/maths/number_of_digits.py
--- a/maths/number_of_digits.py
+++ b/maths/number_of_digits.py
@@ -1,3 +1,7 @@
+import math
+from timeit import timeit
+
+
def num_digits(n: int) -> int:
"""
Find the number of digits in a number.
@@ -14,5 +18,82 @@
return digits
+def num_digits_fast(n: int) -> int:
+ """
+ Find the number of digits in a number.
+ abs() is used as logarithm for negative numbers is not defined.
+
+ >>> num_digits_fast(12345)
+ 5
+ >>> num_digits_fast(123)
+ 3
+ """
+ return (math.floor(math.log(abs(n), 10) + 1))
+
+
+def num_digits_faster(n: int) -> int:
+ """
+ Find the number of digits in a number.
+ abs() is used for negative numbers
+
+ >>> num_digits_faster(12345)
+ 5
+ >>> num_digits_faster(123)
+ 3
+ """
+ return (len(str(abs(n))))
+
+
+def benchmark() -> None:
+ """
+ Benchmark code for comparing 3 functions,
+ with 3 different length int values.
+ """
+ print('\nFor small_num = ', small_num, ':')
+ print("> num_digits()",
+ '\t\tans =', num_digits(small_num),
+ '\ttime =', timeit("z.num_digits(z.small_num)",
+ setup="import __main__ as z"), "seconds")
+ print("> num_digits_fast()",
+ '\tans =', num_digits_fast(small_num),
+ '\ttime =', timeit("z.num_digits_fast(z.small_num)",
+ setup="import __main__ as z"), "seconds")
+ print("> num_digits_faster()",
+ '\tans =', num_digits_faster(small_num),
+ '\ttime =', timeit("z.num_digits_faster(z.small_num)",
+ setup="import __main__ as z"), "seconds")
+
+ print('\nFor medium_num = ', medium_num, ':')
+ print("> num_digits()",
+ '\t\tans =', num_digits(medium_num),
+ '\ttime =', timeit("z.num_digits(z.medium_num)",
+ setup="import __main__ as z"), "seconds")
+ print("> num_digits_fast()",
+ '\tans =', num_digits_fast(medium_num),
+ '\ttime =', timeit("z.num_digits_fast(z.medium_num)",
+ setup="import __main__ as z"), "seconds")
+ print("> num_digits_faster()",
+ '\tans =', num_digits_faster(medium_num),
+ '\ttime =', timeit("z.num_digits_faster(z.medium_num)",
+ setup="import __main__ as z"), "seconds")
+
+ print('\nFor large_num = ', large_num, ':')
+ print("> num_digits()",
+ '\t\tans =', num_digits(large_num),
+ '\ttime =', timeit("z.num_digits(z.large_num)",
+ setup="import __main__ as z"), "seconds")
+ print("> num_digits_fast()",
+ '\tans =', num_digits_fast(large_num),
+ '\ttime =', timeit("z.num_digits_fast(z.large_num)",
+ setup="import __main__ as z"), "seconds")
+ print("> num_digits_faster()",
+ '\tans =', num_digits_faster(large_num),
+ '\ttime =', timeit("z.num_digits_faster(z.large_num)",
+ setup="import __main__ as z"), "seconds")
+
+
if __name__ == "__main__":
- print(num_digits(12345)) # ===> 5
+ small_num = 262144
+ medium_num = 1125899906842624
+ large_num = 1267650600228229401496703205376
+ benchmark()
| {"golden_diff": "diff --git a/maths/number_of_digits.py b/maths/number_of_digits.py\n--- a/maths/number_of_digits.py\n+++ b/maths/number_of_digits.py\n@@ -1,3 +1,7 @@\n+import math\n+from timeit import timeit\n+\n+\n def num_digits(n: int) -> int:\n \"\"\"\n Find the number of digits in a number.\n@@ -14,5 +18,82 @@\n return digits\n \n \n+def num_digits_fast(n: int) -> int:\n+ \"\"\"\n+ Find the number of digits in a number.\n+ abs() is used as logarithm for negative numbers is not defined.\n+\n+ >>> num_digits_fast(12345)\n+ 5\n+ >>> num_digits_fast(123)\n+ 3\n+ \"\"\"\n+ return (math.floor(math.log(abs(n), 10) + 1))\n+\n+\n+def num_digits_faster(n: int) -> int:\n+ \"\"\"\n+ Find the number of digits in a number.\n+ abs() is used for negative numbers\n+\n+ >>> num_digits_faster(12345)\n+ 5\n+ >>> num_digits_faster(123)\n+ 3\n+ \"\"\"\n+ return (len(str(abs(n))))\n+\n+\n+def benchmark() -> None:\n+ \"\"\"\n+ Benchmark code for comparing 3 functions,\n+ with 3 different length int values.\n+ \"\"\"\n+ print('\\nFor small_num = ', small_num, ':')\n+ print(\"> num_digits()\",\n+ '\\t\\tans =', num_digits(small_num),\n+ '\\ttime =', timeit(\"z.num_digits(z.small_num)\",\n+ setup=\"import __main__ as z\"), \"seconds\")\n+ print(\"> num_digits_fast()\",\n+ '\\tans =', num_digits_fast(small_num),\n+ '\\ttime =', timeit(\"z.num_digits_fast(z.small_num)\",\n+ setup=\"import __main__ as z\"), \"seconds\")\n+ print(\"> num_digits_faster()\",\n+ '\\tans =', num_digits_faster(small_num),\n+ '\\ttime =', timeit(\"z.num_digits_faster(z.small_num)\",\n+ setup=\"import __main__ as z\"), \"seconds\")\n+\n+ print('\\nFor medium_num = ', medium_num, ':')\n+ print(\"> num_digits()\",\n+ '\\t\\tans =', num_digits(medium_num),\n+ '\\ttime =', timeit(\"z.num_digits(z.medium_num)\",\n+ setup=\"import __main__ as z\"), \"seconds\")\n+ print(\"> num_digits_fast()\",\n+ '\\tans =', num_digits_fast(medium_num),\n+ '\\ttime =', timeit(\"z.num_digits_fast(z.medium_num)\",\n+ setup=\"import __main__ as z\"), \"seconds\")\n+ print(\"> num_digits_faster()\",\n+ '\\tans =', num_digits_faster(medium_num),\n+ '\\ttime =', timeit(\"z.num_digits_faster(z.medium_num)\",\n+ setup=\"import __main__ as z\"), \"seconds\")\n+\n+ print('\\nFor large_num = ', large_num, ':')\n+ print(\"> num_digits()\",\n+ '\\t\\tans =', num_digits(large_num),\n+ '\\ttime =', timeit(\"z.num_digits(z.large_num)\",\n+ setup=\"import __main__ as z\"), \"seconds\")\n+ print(\"> num_digits_fast()\",\n+ '\\tans =', num_digits_fast(large_num),\n+ '\\ttime =', timeit(\"z.num_digits_fast(z.large_num)\",\n+ setup=\"import __main__ as z\"), \"seconds\")\n+ print(\"> num_digits_faster()\",\n+ '\\tans =', num_digits_faster(large_num),\n+ '\\ttime =', timeit(\"z.num_digits_faster(z.large_num)\",\n+ setup=\"import __main__ as z\"), \"seconds\")\n+\n+\n if __name__ == \"__main__\":\n- print(num_digits(12345)) # ===> 5\n+ small_num = 262144\n+ medium_num = 1125899906842624\n+ large_num = 1267650600228229401496703205376\n+ benchmark()\n", "issue": "maths/number_of_digits.py is naive!\n<code>[maths/number_of_digits.py](https://github.com/TheAlgorithms/Python/blob/master/maths/number_of_digits.py)</code> has a naive method. The suggested method has a time complexity of O(n). We can count number of digits of a number in O(1).\n", "before_files": [{"content": "def num_digits(n: int) -> int:\n \"\"\"\n Find the number of digits in a number.\n\n >>> num_digits(12345)\n 5\n >>> num_digits(123)\n 3\n \"\"\"\n digits = 0\n while n > 0:\n n = n // 10\n digits += 1\n return digits\n\n\nif __name__ == \"__main__\":\n print(num_digits(12345)) # ===> 5\n", "path": "maths/number_of_digits.py"}]} | 750 | 977 |
gh_patches_debug_1975 | rasdani/github-patches | git_diff | jupyter__docker-stacks-1859 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] - Health Check fails if you change the port jupyter runs on
### What docker image(s) are you using?
minimal-notebook
### OS system and architecture running docker image
RHEL7 docker swarm
### What Docker command are you running?
Not really relevant, but I need to run it in a docker swarm, with a generalise 'ingress service'.
For this I needed to change internal port jupyter runs on needs to be changes for intergation into a 'ingress proxy'
To change the port I made a slight modification the docker image to set the internal port it runs on (see below)
The problem is the docker container dies unexpectedly after running for 46 seconds.
During that time the service is visible within the conatiner, but not external to the container.
This is because the built-in heathcheck never succeeds, and eventually kills the container with little logged reporting.
(see below)
### How to Reproduce the problem?
Dockerfile, to set port
```dockerfile
FROM "jupyter/minimal-notebook:latest"
# Update Jupyter configuration to set port
RUN set -eux; \
sed -i 's/port = 8888/port = 8080/' /etc/jupyter/jupyter_notebook_config.py ;\
sed -i 's/port = 8888/port = 8080/' /etc/jupyter/jupyter_server_config.py ;\
:;
```
You can also change the port in other ways such as...
Creating a `~joyvan/.jupyter/jupyter_server_config.py` file (which can also set a password)
or setting a JUPYTER_PORT environment variable (IF the setting in `/etc/jupyter` configs are removed)
### Command output
When you build and then run the modified docker image, `docker ps` reports
`Up 9 seconds (health: starting) 8888/tcp` despite the fact that jupyter is now running on port 8080
46 seconds after starting the container dies with a unhelpful (Signal 15)
Log output...
```
[I 2022-10-28 05:20:00.393 ServerApp] Jupyter Server 1.21.0 is running at:
[I 2022-10-28 05:20:00.393 ServerApp] http://jupyter_service:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c
or http://127.0.0.1:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c
[I 2022-10-28 05:20:00.393 ServerApp] Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).
[C 2022-10-28 05:20:00.397 ServerApp]
To access the server, open this file in a browser:
file:///home/jovyan/.local/share/jupyter/runtime/jpserver-8-open.html
Or copy and paste one of these URLs:
http://jupyter_service:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c
or http://127.0.0.1:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c
Entered start.sh with args: jupyter lab
Executing the command: jupyter lab
[C 2022-10-28 05:20:46.261 ServerApp] received signal 15, stopping
[I 2022-10-28 05:20:46.262 ServerApp] Shutting down 2 extensions
[I 2022-10-28 05:20:46.263 ServerApp] Shutting down 0 terminals
```
### Expected behavior
Changing the internal port should not take days of work to track down, it should be straight forward and documented.
The healthcheck should also be properly documented in jupyter-stacks documentation.
This will make it more 'swarm friendly' as well as allow others to integrate it better when port 8888 is NOT available.
Yes you can map the port when doing a 'docker run', but that is NOT always possible.
### Actual behavior
Internal Port changing is undocumented in stacks
Heathcheck kills the container without notice (signal 15 hardly makes it clear) when port is different.
Days of work lost trying to figure out what should be a straight forward and simple task.
### Anything else?
There is an existing environment variable "JUPYTER_PORT" that defines the default port.
But any such setting is currently overridden by the configuration files in `/etc/jupyter`
This may be usable to set healthcheck, especially if the config file default is removed, or allows the env var to override.
in Dockerfile....
```
HEALTHCHECK --interval=15s --timeout=3s --start-period=5s --retries=3 \
CMD wget -O- --no-verbose --tries=1 --no-check-certificate \
http${GEN_CERT:+s}://localhost:${JUPYTER_PORT:-8888}${JUPYTERHUB_SERVICE_PREFIX:-/}api || exit 1
```
That Environment variable also needs to be documented in the jupyter-stacks documentation, with the health check.
[BUG] - Health Check fails if you change the port jupyter runs on
### What docker image(s) are you using?
minimal-notebook
### OS system and architecture running docker image
RHEL7 docker swarm
### What Docker command are you running?
Not really relevant, but I need to run it in a docker swarm, with a generalise 'ingress service'.
For this I needed to change internal port jupyter runs on needs to be changes for intergation into a 'ingress proxy'
To change the port I made a slight modification the docker image to set the internal port it runs on (see below)
The problem is the docker container dies unexpectedly after running for 46 seconds.
During that time the service is visible within the conatiner, but not external to the container.
This is because the built-in heathcheck never succeeds, and eventually kills the container with little logged reporting.
(see below)
### How to Reproduce the problem?
Dockerfile, to set port
```dockerfile
FROM "jupyter/minimal-notebook:latest"
# Update Jupyter configuration to set port
RUN set -eux; \
sed -i 's/port = 8888/port = 8080/' /etc/jupyter/jupyter_notebook_config.py ;\
sed -i 's/port = 8888/port = 8080/' /etc/jupyter/jupyter_server_config.py ;\
:;
```
You can also change the port in other ways such as...
Creating a `~joyvan/.jupyter/jupyter_server_config.py` file (which can also set a password)
or setting a JUPYTER_PORT environment variable (IF the setting in `/etc/jupyter` configs are removed)
### Command output
When you build and then run the modified docker image, `docker ps` reports
`Up 9 seconds (health: starting) 8888/tcp` despite the fact that jupyter is now running on port 8080
46 seconds after starting the container dies with a unhelpful (Signal 15)
Log output...
```
[I 2022-10-28 05:20:00.393 ServerApp] Jupyter Server 1.21.0 is running at:
[I 2022-10-28 05:20:00.393 ServerApp] http://jupyter_service:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c
or http://127.0.0.1:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c
[I 2022-10-28 05:20:00.393 ServerApp] Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).
[C 2022-10-28 05:20:00.397 ServerApp]
To access the server, open this file in a browser:
file:///home/jovyan/.local/share/jupyter/runtime/jpserver-8-open.html
Or copy and paste one of these URLs:
http://jupyter_service:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c
or http://127.0.0.1:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c
Entered start.sh with args: jupyter lab
Executing the command: jupyter lab
[C 2022-10-28 05:20:46.261 ServerApp] received signal 15, stopping
[I 2022-10-28 05:20:46.262 ServerApp] Shutting down 2 extensions
[I 2022-10-28 05:20:46.263 ServerApp] Shutting down 0 terminals
```
### Expected behavior
Changing the internal port should not take days of work to track down, it should be straight forward and documented.
The healthcheck should also be properly documented in jupyter-stacks documentation.
This will make it more 'swarm friendly' as well as allow others to integrate it better when port 8888 is NOT available.
Yes you can map the port when doing a 'docker run', but that is NOT always possible.
### Actual behavior
Internal Port changing is undocumented in stacks
Heathcheck kills the container without notice (signal 15 hardly makes it clear) when port is different.
Days of work lost trying to figure out what should be a straight forward and simple task.
### Anything else?
There is an existing environment variable "JUPYTER_PORT" that defines the default port.
But any such setting is currently overridden by the configuration files in `/etc/jupyter`
This may be usable to set healthcheck, especially if the config file default is removed, or allows the env var to override.
in Dockerfile....
```
HEALTHCHECK --interval=15s --timeout=3s --start-period=5s --retries=3 \
CMD wget -O- --no-verbose --tries=1 --no-check-certificate \
http${GEN_CERT:+s}://localhost:${JUPYTER_PORT:-8888}${JUPYTERHUB_SERVICE_PREFIX:-/}api || exit 1
```
That Environment variable also needs to be documented in the jupyter-stacks documentation, with the health check.
</issue>
<code>
[start of base-notebook/jupyter_server_config.py]
1 # Copyright (c) Jupyter Development Team.
2 # Distributed under the terms of the Modified BSD License.
3 # mypy: ignore-errors
4 import os
5 import stat
6 import subprocess
7
8 from jupyter_core.paths import jupyter_data_dir
9
10 c = get_config() # noqa: F821
11 c.ServerApp.ip = "0.0.0.0"
12 c.ServerApp.port = 8888
13 c.ServerApp.open_browser = False
14
15 # to output both image/svg+xml and application/pdf plot formats in the notebook file
16 c.InlineBackend.figure_formats = {"png", "jpeg", "svg", "pdf"}
17
18 # https://github.com/jupyter/notebook/issues/3130
19 c.FileContentsManager.delete_to_trash = False
20
21 # Generate a self-signed certificate
22 OPENSSL_CONFIG = """\
23 [req]
24 distinguished_name = req_distinguished_name
25 [req_distinguished_name]
26 """
27 if "GEN_CERT" in os.environ:
28 dir_name = jupyter_data_dir()
29 pem_file = os.path.join(dir_name, "notebook.pem")
30 os.makedirs(dir_name, exist_ok=True)
31
32 # Generate an openssl.cnf file to set the distinguished name
33 cnf_file = os.path.join(os.getenv("CONDA_DIR", "/usr/lib"), "ssl", "openssl.cnf")
34 if not os.path.isfile(cnf_file):
35 with open(cnf_file, "w") as fh:
36 fh.write(OPENSSL_CONFIG)
37
38 # Generate a certificate if one doesn't exist on disk
39 subprocess.check_call(
40 [
41 "openssl",
42 "req",
43 "-new",
44 "-newkey=rsa:2048",
45 "-days=365",
46 "-nodes",
47 "-x509",
48 "-subj=/C=XX/ST=XX/L=XX/O=generated/CN=generated",
49 f"-keyout={pem_file}",
50 f"-out={pem_file}",
51 ]
52 )
53 # Restrict access to the file
54 os.chmod(pem_file, stat.S_IRUSR | stat.S_IWUSR)
55 c.ServerApp.certfile = pem_file
56
57 # Change default umask for all subprocesses of the notebook server if set in
58 # the environment
59 if "NB_UMASK" in os.environ:
60 os.umask(int(os.environ["NB_UMASK"], 8))
61
[end of base-notebook/jupyter_server_config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/base-notebook/jupyter_server_config.py b/base-notebook/jupyter_server_config.py
--- a/base-notebook/jupyter_server_config.py
+++ b/base-notebook/jupyter_server_config.py
@@ -9,7 +9,6 @@
c = get_config() # noqa: F821
c.ServerApp.ip = "0.0.0.0"
-c.ServerApp.port = 8888
c.ServerApp.open_browser = False
# to output both image/svg+xml and application/pdf plot formats in the notebook file
| {"golden_diff": "diff --git a/base-notebook/jupyter_server_config.py b/base-notebook/jupyter_server_config.py\n--- a/base-notebook/jupyter_server_config.py\n+++ b/base-notebook/jupyter_server_config.py\n@@ -9,7 +9,6 @@\n \n c = get_config() # noqa: F821\n c.ServerApp.ip = \"0.0.0.0\"\n-c.ServerApp.port = 8888\n c.ServerApp.open_browser = False\n \n # to output both image/svg+xml and application/pdf plot formats in the notebook file\n", "issue": "[BUG] - Health Check fails if you change the port jupyter runs on\n### What docker image(s) are you using?\r\n\r\nminimal-notebook\r\n\r\n### OS system and architecture running docker image\r\n\r\nRHEL7 docker swarm\r\n\r\n### What Docker command are you running?\r\n\r\nNot really relevant, but I need to run it in a docker swarm, with a generalise 'ingress service'.\r\n\r\nFor this I needed to change internal port jupyter runs on needs to be changes for intergation into a 'ingress proxy'\r\nTo change the port I made a slight modification the docker image to set the internal port it runs on (see below)\r\n\r\nThe problem is the docker container dies unexpectedly after running for 46 seconds.\r\nDuring that time the service is visible within the conatiner, but not external to the container.\r\nThis is because the built-in heathcheck never succeeds, and eventually kills the container with little logged reporting.\r\n(see below)\r\n\r\n\r\n### How to Reproduce the problem?\r\n\r\nDockerfile, to set port\r\n\r\n```dockerfile\r\nFROM \"jupyter/minimal-notebook:latest\"\r\n# Update Jupyter configuration to set port\r\nRUN set -eux; \\\r\n sed -i 's/port = 8888/port = 8080/' /etc/jupyter/jupyter_notebook_config.py ;\\\r\n sed -i 's/port = 8888/port = 8080/' /etc/jupyter/jupyter_server_config.py ;\\\r\n :;\r\n```\r\nYou can also change the port in other ways such as... \r\n\r\nCreating a `~joyvan/.jupyter/jupyter_server_config.py` file (which can also set a password)\r\n\r\nor setting a JUPYTER_PORT environment variable (IF the setting in `/etc/jupyter` configs are removed)\r\n\r\n\r\n\r\n### Command output\r\n\r\nWhen you build and then run the modified docker image, `docker ps` reports\r\n`Up 9 seconds (health: starting) 8888/tcp` despite the fact that jupyter is now running on port 8080\r\n\r\n46 seconds after starting the container dies with a unhelpful (Signal 15)\r\n\r\nLog output...\r\n```\r\n[I 2022-10-28 05:20:00.393 ServerApp] Jupyter Server 1.21.0 is running at:\r\n[I 2022-10-28 05:20:00.393 ServerApp] http://jupyter_service:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c\r\nor http://127.0.0.1:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c\r\n[I 2022-10-28 05:20:00.393 ServerApp] Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).\r\n[C 2022-10-28 05:20:00.397 ServerApp] \r\n\r\nTo access the server, open this file in a browser:\r\nfile:///home/jovyan/.local/share/jupyter/runtime/jpserver-8-open.html\r\nOr copy and paste one of these URLs:\r\nhttp://jupyter_service:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c\r\nor http://127.0.0.1:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c\r\nEntered start.sh with args: jupyter lab\r\nExecuting the command: jupyter lab\r\n[C 2022-10-28 05:20:46.261 ServerApp] received signal 15, stopping\r\n[I 2022-10-28 05:20:46.262 ServerApp] Shutting down 2 extensions\r\n[I 2022-10-28 05:20:46.263 ServerApp] Shutting down 0 terminals\r\n```\r\n\r\n### Expected behavior\r\n\r\nChanging the internal port should not take days of work to track down, it should be straight forward and documented.\r\n\r\nThe healthcheck should also be properly documented in jupyter-stacks documentation.\r\nThis will make it more 'swarm friendly' as well as allow others to integrate it better when port 8888 is NOT available.\r\n\r\nYes you can map the port when doing a 'docker run', but that is NOT always possible.\r\n\r\n### Actual behavior\r\n\r\nInternal Port changing is undocumented in stacks\r\n\r\nHeathcheck kills the container without notice (signal 15 hardly makes it clear) when port is different.\r\n\r\nDays of work lost trying to figure out what should be a straight forward and simple task.\r\n\r\n### Anything else?\r\n\r\nThere is an existing environment variable \"JUPYTER_PORT\" that defines the default port.\r\nBut any such setting is currently overridden by the configuration files in `/etc/jupyter`\r\n\r\nThis may be usable to set healthcheck, especially if the config file default is removed, or allows the env var to override.\r\n\r\nin Dockerfile....\r\n```\r\nHEALTHCHECK --interval=15s --timeout=3s --start-period=5s --retries=3 \\\r\n CMD wget -O- --no-verbose --tries=1 --no-check-certificate \\\r\n http${GEN_CERT:+s}://localhost:${JUPYTER_PORT:-8888}${JUPYTERHUB_SERVICE_PREFIX:-/}api || exit 1\r\n```\r\n\r\nThat Environment variable also needs to be documented in the jupyter-stacks documentation, with the health check.\n[BUG] - Health Check fails if you change the port jupyter runs on\n### What docker image(s) are you using?\r\n\r\nminimal-notebook\r\n\r\n### OS system and architecture running docker image\r\n\r\nRHEL7 docker swarm\r\n\r\n### What Docker command are you running?\r\n\r\nNot really relevant, but I need to run it in a docker swarm, with a generalise 'ingress service'.\r\n\r\nFor this I needed to change internal port jupyter runs on needs to be changes for intergation into a 'ingress proxy'\r\nTo change the port I made a slight modification the docker image to set the internal port it runs on (see below)\r\n\r\nThe problem is the docker container dies unexpectedly after running for 46 seconds.\r\nDuring that time the service is visible within the conatiner, but not external to the container.\r\nThis is because the built-in heathcheck never succeeds, and eventually kills the container with little logged reporting.\r\n(see below)\r\n\r\n\r\n### How to Reproduce the problem?\r\n\r\nDockerfile, to set port\r\n\r\n```dockerfile\r\nFROM \"jupyter/minimal-notebook:latest\"\r\n# Update Jupyter configuration to set port\r\nRUN set -eux; \\\r\n sed -i 's/port = 8888/port = 8080/' /etc/jupyter/jupyter_notebook_config.py ;\\\r\n sed -i 's/port = 8888/port = 8080/' /etc/jupyter/jupyter_server_config.py ;\\\r\n :;\r\n```\r\nYou can also change the port in other ways such as... \r\n\r\nCreating a `~joyvan/.jupyter/jupyter_server_config.py` file (which can also set a password)\r\n\r\nor setting a JUPYTER_PORT environment variable (IF the setting in `/etc/jupyter` configs are removed)\r\n\r\n\r\n\r\n### Command output\r\n\r\nWhen you build and then run the modified docker image, `docker ps` reports\r\n`Up 9 seconds (health: starting) 8888/tcp` despite the fact that jupyter is now running on port 8080\r\n\r\n46 seconds after starting the container dies with a unhelpful (Signal 15)\r\n\r\nLog output...\r\n```\r\n[I 2022-10-28 05:20:00.393 ServerApp] Jupyter Server 1.21.0 is running at:\r\n[I 2022-10-28 05:20:00.393 ServerApp] http://jupyter_service:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c\r\nor http://127.0.0.1:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c\r\n[I 2022-10-28 05:20:00.393 ServerApp] Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).\r\n[C 2022-10-28 05:20:00.397 ServerApp] \r\n\r\nTo access the server, open this file in a browser:\r\nfile:///home/jovyan/.local/share/jupyter/runtime/jpserver-8-open.html\r\nOr copy and paste one of these URLs:\r\nhttp://jupyter_service:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c\r\nor http://127.0.0.1:8080/lab?token=929eaaf2e60f8a947761cb1f2741d745fd46dde62f6fef7c\r\nEntered start.sh with args: jupyter lab\r\nExecuting the command: jupyter lab\r\n[C 2022-10-28 05:20:46.261 ServerApp] received signal 15, stopping\r\n[I 2022-10-28 05:20:46.262 ServerApp] Shutting down 2 extensions\r\n[I 2022-10-28 05:20:46.263 ServerApp] Shutting down 0 terminals\r\n```\r\n\r\n### Expected behavior\r\n\r\nChanging the internal port should not take days of work to track down, it should be straight forward and documented.\r\n\r\nThe healthcheck should also be properly documented in jupyter-stacks documentation.\r\nThis will make it more 'swarm friendly' as well as allow others to integrate it better when port 8888 is NOT available.\r\n\r\nYes you can map the port when doing a 'docker run', but that is NOT always possible.\r\n\r\n### Actual behavior\r\n\r\nInternal Port changing is undocumented in stacks\r\n\r\nHeathcheck kills the container without notice (signal 15 hardly makes it clear) when port is different.\r\n\r\nDays of work lost trying to figure out what should be a straight forward and simple task.\r\n\r\n### Anything else?\r\n\r\nThere is an existing environment variable \"JUPYTER_PORT\" that defines the default port.\r\nBut any such setting is currently overridden by the configuration files in `/etc/jupyter`\r\n\r\nThis may be usable to set healthcheck, especially if the config file default is removed, or allows the env var to override.\r\n\r\nin Dockerfile....\r\n```\r\nHEALTHCHECK --interval=15s --timeout=3s --start-period=5s --retries=3 \\\r\n CMD wget -O- --no-verbose --tries=1 --no-check-certificate \\\r\n http${GEN_CERT:+s}://localhost:${JUPYTER_PORT:-8888}${JUPYTERHUB_SERVICE_PREFIX:-/}api || exit 1\r\n```\r\n\r\nThat Environment variable also needs to be documented in the jupyter-stacks documentation, with the health check.\n", "before_files": [{"content": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n# mypy: ignore-errors\nimport os\nimport stat\nimport subprocess\n\nfrom jupyter_core.paths import jupyter_data_dir\n\nc = get_config() # noqa: F821\nc.ServerApp.ip = \"0.0.0.0\"\nc.ServerApp.port = 8888\nc.ServerApp.open_browser = False\n\n# to output both image/svg+xml and application/pdf plot formats in the notebook file\nc.InlineBackend.figure_formats = {\"png\", \"jpeg\", \"svg\", \"pdf\"}\n\n# https://github.com/jupyter/notebook/issues/3130\nc.FileContentsManager.delete_to_trash = False\n\n# Generate a self-signed certificate\nOPENSSL_CONFIG = \"\"\"\\\n[req]\ndistinguished_name = req_distinguished_name\n[req_distinguished_name]\n\"\"\"\nif \"GEN_CERT\" in os.environ:\n dir_name = jupyter_data_dir()\n pem_file = os.path.join(dir_name, \"notebook.pem\")\n os.makedirs(dir_name, exist_ok=True)\n\n # Generate an openssl.cnf file to set the distinguished name\n cnf_file = os.path.join(os.getenv(\"CONDA_DIR\", \"/usr/lib\"), \"ssl\", \"openssl.cnf\")\n if not os.path.isfile(cnf_file):\n with open(cnf_file, \"w\") as fh:\n fh.write(OPENSSL_CONFIG)\n\n # Generate a certificate if one doesn't exist on disk\n subprocess.check_call(\n [\n \"openssl\",\n \"req\",\n \"-new\",\n \"-newkey=rsa:2048\",\n \"-days=365\",\n \"-nodes\",\n \"-x509\",\n \"-subj=/C=XX/ST=XX/L=XX/O=generated/CN=generated\",\n f\"-keyout={pem_file}\",\n f\"-out={pem_file}\",\n ]\n )\n # Restrict access to the file\n os.chmod(pem_file, stat.S_IRUSR | stat.S_IWUSR)\n c.ServerApp.certfile = pem_file\n\n# Change default umask for all subprocesses of the notebook server if set in\n# the environment\nif \"NB_UMASK\" in os.environ:\n os.umask(int(os.environ[\"NB_UMASK\"], 8))\n", "path": "base-notebook/jupyter_server_config.py"}]} | 3,832 | 119 |
gh_patches_debug_3043 | rasdani/github-patches | git_diff | docker__docker-py-1250 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
attach is causing an "Invalid Argument" exception from os.read
``` python
stream = client.attach(container, stream=True, stdout=True, stderr=True)
for chunk in stream:
pass
```
Results in:
```
File "/Users/michael/work/oss/marina/marina/build.py", line 695, in watcher
for chunk in stream:
File ".venv/lib/python3.5/site-packages/docker/utils/socket.py", line 67, in frames_iter
yield read(socket, n)
File ".venv/lib/python3.5/site-packages/docker/utils/socket.py", line 25, in read
return os.read(socket.fileno(), n)
OSError: [Errno 22] Invalid argument
```
Using docker-py 1.10.2 on OS X 10.11.6 with docker for mac 1.12.0-rc3. Reverting to 1.9.0 fixes the issue.
</issue>
<code>
[start of docker/utils/socket.py]
1 import errno
2 import os
3 import select
4 import struct
5
6 import six
7
8 try:
9 from ..transport import NpipeSocket
10 except ImportError:
11 NpipeSocket = type(None)
12
13
14 class SocketError(Exception):
15 pass
16
17
18 def read(socket, n=4096):
19 """
20 Reads at most n bytes from socket
21 """
22
23 recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
24
25 # wait for data to become available
26 if not isinstance(socket, NpipeSocket):
27 select.select([socket], [], [])
28
29 try:
30 if hasattr(socket, 'recv'):
31 return socket.recv(n)
32 return os.read(socket.fileno(), n)
33 except EnvironmentError as e:
34 if e.errno not in recoverable_errors:
35 raise
36
37
38 def read_exactly(socket, n):
39 """
40 Reads exactly n bytes from socket
41 Raises SocketError if there isn't enough data
42 """
43 data = six.binary_type()
44 while len(data) < n:
45 next_data = read(socket, n - len(data))
46 if not next_data:
47 raise SocketError("Unexpected EOF")
48 data += next_data
49 return data
50
51
52 def next_frame_size(socket):
53 """
54 Returns the size of the next frame of data waiting to be read from socket,
55 according to the protocol defined here:
56
57 https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/attach-to-a-container
58 """
59 try:
60 data = read_exactly(socket, 8)
61 except SocketError:
62 return 0
63
64 _, actual = struct.unpack('>BxxxL', data)
65 return actual
66
67
68 def frames_iter(socket):
69 """
70 Returns a generator of frames read from socket
71 """
72 n = next_frame_size(socket)
73 while n > 0:
74 yield read(socket, n)
75 n = next_frame_size(socket)
76
[end of docker/utils/socket.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docker/utils/socket.py b/docker/utils/socket.py
--- a/docker/utils/socket.py
+++ b/docker/utils/socket.py
@@ -69,7 +69,11 @@
"""
Returns a generator of frames read from socket
"""
- n = next_frame_size(socket)
- while n > 0:
- yield read(socket, n)
+ while True:
n = next_frame_size(socket)
+ if n == 0:
+ break
+ while n > 0:
+ result = read(socket, n)
+ n -= len(result)
+ yield result
| {"golden_diff": "diff --git a/docker/utils/socket.py b/docker/utils/socket.py\n--- a/docker/utils/socket.py\n+++ b/docker/utils/socket.py\n@@ -69,7 +69,11 @@\n \"\"\"\n Returns a generator of frames read from socket\n \"\"\"\n- n = next_frame_size(socket)\n- while n > 0:\n- yield read(socket, n)\n+ while True:\n n = next_frame_size(socket)\n+ if n == 0:\n+ break\n+ while n > 0:\n+ result = read(socket, n)\n+ n -= len(result)\n+ yield result\n", "issue": "attach is causing an \"Invalid Argument\" exception from os.read\n``` python\nstream = client.attach(container, stream=True, stdout=True, stderr=True)\nfor chunk in stream:\n pass\n```\n\nResults in:\n\n```\n File \"/Users/michael/work/oss/marina/marina/build.py\", line 695, in watcher\n for chunk in stream:\n File \".venv/lib/python3.5/site-packages/docker/utils/socket.py\", line 67, in frames_iter\n yield read(socket, n)\n File \".venv/lib/python3.5/site-packages/docker/utils/socket.py\", line 25, in read\n return os.read(socket.fileno(), n)\nOSError: [Errno 22] Invalid argument\n```\n\nUsing docker-py 1.10.2 on OS X 10.11.6 with docker for mac 1.12.0-rc3. Reverting to 1.9.0 fixes the issue.\n\n", "before_files": [{"content": "import errno\nimport os\nimport select\nimport struct\n\nimport six\n\ntry:\n from ..transport import NpipeSocket\nexcept ImportError:\n NpipeSocket = type(None)\n\n\nclass SocketError(Exception):\n pass\n\n\ndef read(socket, n=4096):\n \"\"\"\n Reads at most n bytes from socket\n \"\"\"\n\n recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)\n\n # wait for data to become available\n if not isinstance(socket, NpipeSocket):\n select.select([socket], [], [])\n\n try:\n if hasattr(socket, 'recv'):\n return socket.recv(n)\n return os.read(socket.fileno(), n)\n except EnvironmentError as e:\n if e.errno not in recoverable_errors:\n raise\n\n\ndef read_exactly(socket, n):\n \"\"\"\n Reads exactly n bytes from socket\n Raises SocketError if there isn't enough data\n \"\"\"\n data = six.binary_type()\n while len(data) < n:\n next_data = read(socket, n - len(data))\n if not next_data:\n raise SocketError(\"Unexpected EOF\")\n data += next_data\n return data\n\n\ndef next_frame_size(socket):\n \"\"\"\n Returns the size of the next frame of data waiting to be read from socket,\n according to the protocol defined here:\n\n https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/attach-to-a-container\n \"\"\"\n try:\n data = read_exactly(socket, 8)\n except SocketError:\n return 0\n\n _, actual = struct.unpack('>BxxxL', data)\n return actual\n\n\ndef frames_iter(socket):\n \"\"\"\n Returns a generator of frames read from socket\n \"\"\"\n n = next_frame_size(socket)\n while n > 0:\n yield read(socket, n)\n n = next_frame_size(socket)\n", "path": "docker/utils/socket.py"}]} | 1,303 | 134 |
gh_patches_debug_23435 | rasdani/github-patches | git_diff | inventree__InvenTree-1934 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Heartbeat has a datetime warning
From the worker logs:
`inventree-worker | 00:51:15 [Q] INFO Process-1 created a task from schedule [InvenTree.tasks.heartbeat]
inventree-worker | 00:51:15 [Q] INFO Process-1:3 processing [may-cat-eleven-beer]
inventree-worker | Could not perform heartbeat task - App registry not ready
inventree-worker | /usr/lib/python3.8/site-packages/django/db/models/fields/__init__.py:1416: RuntimeWarning: DateTimeField Task.started received a naive datetime (2021-08-10 00:21:15.816018) while time zone support is active.
inventree-worker | warnings.warn("DateTimeField %s received a naive datetime (%s)"
`
The heartbeat task references a non timezoned date which django complains about as USE_TZ is True in the settings.py
</issue>
<code>
[start of InvenTree/InvenTree/tasks.py]
1 # -*- coding: utf-8 -*-
2 from __future__ import unicode_literals
3
4 import re
5 import json
6 import requests
7 import logging
8
9 from datetime import datetime, timedelta
10
11 from django.core.exceptions import AppRegistryNotReady
12 from django.db.utils import OperationalError, ProgrammingError
13
14
15 logger = logging.getLogger("inventree")
16
17
18 def schedule_task(taskname, **kwargs):
19 """
20 Create a scheduled task.
21 If the task has already been scheduled, ignore!
22 """
23
24 # If unspecified, repeat indefinitely
25 repeats = kwargs.pop('repeats', -1)
26 kwargs['repeats'] = repeats
27
28 try:
29 from django_q.models import Schedule
30 except (AppRegistryNotReady):
31 logger.info("Could not start background tasks - App registry not ready")
32 return
33
34 try:
35 # If this task is already scheduled, don't schedule it again
36 # Instead, update the scheduling parameters
37 if Schedule.objects.filter(func=taskname).exists():
38 logger.info(f"Scheduled task '{taskname}' already exists - updating!")
39
40 Schedule.objects.filter(func=taskname).update(**kwargs)
41 else:
42 logger.info(f"Creating scheduled task '{taskname}'")
43
44 Schedule.objects.create(
45 name=taskname,
46 func=taskname,
47 **kwargs
48 )
49 except (OperationalError, ProgrammingError):
50 # Required if the DB is not ready yet
51 pass
52
53
54 def offload_task(taskname, force_sync=False, *args, **kwargs):
55 """
56 Create an AsyncTask if workers are running.
57 This is different to a 'scheduled' task,
58 in that it only runs once!
59
60 If workers are not running or force_sync flag
61 is set then the task is ran synchronously.
62 """
63
64 try:
65 from django_q.tasks import AsyncTask
66 except (AppRegistryNotReady):
67 logger.warning("Could not offload task - app registry not ready")
68 return
69 import importlib
70 from InvenTree.status import is_worker_running
71
72 if is_worker_running() and not force_sync:
73 # Running as asynchronous task
74 try:
75 task = AsyncTask(taskname, *args, **kwargs)
76 task.run()
77 except ImportError:
78 logger.warning(f"WARNING: '{taskname}' not started - Function not found")
79 else:
80 # Split path
81 try:
82 app, mod, func = taskname.split('.')
83 app_mod = app + '.' + mod
84 except ValueError:
85 logger.warning(f"WARNING: '{taskname}' not started - Malformed function path")
86 return
87
88 # Import module from app
89 try:
90 _mod = importlib.import_module(app_mod)
91 except ModuleNotFoundError:
92 logger.warning(f"WARNING: '{taskname}' not started - No module named '{app_mod}'")
93 return
94
95 # Retrieve function
96 try:
97 _func = getattr(_mod, func)
98 except AttributeError:
99 # getattr does not work for local import
100 _func = None
101
102 try:
103 if not _func:
104 _func = eval(func)
105 except NameError:
106 logger.warning(f"WARNING: '{taskname}' not started - No function named '{func}'")
107 return
108
109 # Workers are not running: run it as synchronous task
110 _func()
111
112
113 def heartbeat():
114 """
115 Simple task which runs at 5 minute intervals,
116 so we can determine that the background worker
117 is actually running.
118
119 (There is probably a less "hacky" way of achieving this)?
120 """
121
122 try:
123 from django_q.models import Success
124 logger.info("Could not perform heartbeat task - App registry not ready")
125 except AppRegistryNotReady:
126 return
127
128 threshold = datetime.now() - timedelta(minutes=30)
129
130 # Delete heartbeat results more than half an hour old,
131 # otherwise they just create extra noise
132 heartbeats = Success.objects.filter(
133 func='InvenTree.tasks.heartbeat',
134 started__lte=threshold
135 )
136
137 heartbeats.delete()
138
139
140 def delete_successful_tasks():
141 """
142 Delete successful task logs
143 which are more than a month old.
144 """
145
146 try:
147 from django_q.models import Success
148 except AppRegistryNotReady:
149 logger.info("Could not perform 'delete_successful_tasks' - App registry not ready")
150 return
151
152 threshold = datetime.now() - timedelta(days=30)
153
154 results = Success.objects.filter(
155 started__lte=threshold
156 )
157
158 results.delete()
159
160
161 def check_for_updates():
162 """
163 Check if there is an update for InvenTree
164 """
165
166 try:
167 import common.models
168 except AppRegistryNotReady:
169 # Apps not yet loaded!
170 logger.info("Could not perform 'check_for_updates' - App registry not ready")
171 return
172
173 response = requests.get('https://api.github.com/repos/inventree/inventree/releases/latest')
174
175 if not response.status_code == 200:
176 raise ValueError(f'Unexpected status code from GitHub API: {response.status_code}')
177
178 data = json.loads(response.text)
179
180 tag = data.get('tag_name', None)
181
182 if not tag:
183 raise ValueError("'tag_name' missing from GitHub response")
184
185 match = re.match(r"^.*(\d+)\.(\d+)\.(\d+).*$", tag)
186
187 if not len(match.groups()) == 3:
188 logger.warning(f"Version '{tag}' did not match expected pattern")
189 return
190
191 latest_version = [int(x) for x in match.groups()]
192
193 if not len(latest_version) == 3:
194 raise ValueError(f"Version '{tag}' is not correct format")
195
196 logger.info(f"Latest InvenTree version: '{tag}'")
197
198 # Save the version to the database
199 common.models.InvenTreeSetting.set_setting(
200 'INVENTREE_LATEST_VERSION',
201 tag,
202 None
203 )
204
205
206 def update_exchange_rates():
207 """
208 Update currency exchange rates
209 """
210
211 try:
212 from InvenTree.exchange import InvenTreeExchange
213 from djmoney.contrib.exchange.models import ExchangeBackend, Rate
214 from common.settings import currency_code_default, currency_codes
215 except AppRegistryNotReady:
216 # Apps not yet loaded!
217 logger.info("Could not perform 'update_exchange_rates' - App registry not ready")
218 return
219 except:
220 # Other error?
221 return
222
223 # Test to see if the database is ready yet
224 try:
225 backend = ExchangeBackend.objects.get(name='InvenTreeExchange')
226 except ExchangeBackend.DoesNotExist:
227 pass
228 except:
229 # Some other error
230 print("Database not ready")
231 return
232
233 backend = InvenTreeExchange()
234 print(f"Updating exchange rates from {backend.url}")
235
236 base = currency_code_default()
237
238 print(f"Using base currency '{base}'")
239
240 backend.update_rates(base_currency=base)
241
242 # Remove any exchange rates which are not in the provided currencies
243 Rate.objects.filter(backend="InvenTreeExchange").exclude(currency__in=currency_codes()).delete()
244
245
246 def send_email(subject, body, recipients, from_email=None):
247 """
248 Send an email with the specified subject and body,
249 to the specified recipients list.
250 """
251
252 if type(recipients) == str:
253 recipients = [recipients]
254
255 offload_task(
256 'django.core.mail.send_mail',
257 subject, body,
258 from_email,
259 recipients,
260 )
261
[end of InvenTree/InvenTree/tasks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/InvenTree/InvenTree/tasks.py b/InvenTree/InvenTree/tasks.py
--- a/InvenTree/InvenTree/tasks.py
+++ b/InvenTree/InvenTree/tasks.py
@@ -6,7 +6,8 @@
import requests
import logging
-from datetime import datetime, timedelta
+from datetime import timedelta
+from django.utils import timezone
from django.core.exceptions import AppRegistryNotReady
from django.db.utils import OperationalError, ProgrammingError
@@ -125,7 +126,7 @@
except AppRegistryNotReady:
return
- threshold = datetime.now() - timedelta(minutes=30)
+ threshold = timezone.now() - timedelta(minutes=30)
# Delete heartbeat results more than half an hour old,
# otherwise they just create extra noise
@@ -149,7 +150,7 @@
logger.info("Could not perform 'delete_successful_tasks' - App registry not ready")
return
- threshold = datetime.now() - timedelta(days=30)
+ threshold = timezone.now() - timedelta(days=30)
results = Success.objects.filter(
started__lte=threshold
| {"golden_diff": "diff --git a/InvenTree/InvenTree/tasks.py b/InvenTree/InvenTree/tasks.py\n--- a/InvenTree/InvenTree/tasks.py\n+++ b/InvenTree/InvenTree/tasks.py\n@@ -6,7 +6,8 @@\n import requests\n import logging\n \n-from datetime import datetime, timedelta\n+from datetime import timedelta\n+from django.utils import timezone\n \n from django.core.exceptions import AppRegistryNotReady\n from django.db.utils import OperationalError, ProgrammingError\n@@ -125,7 +126,7 @@\n except AppRegistryNotReady:\n return\n \n- threshold = datetime.now() - timedelta(minutes=30)\n+ threshold = timezone.now() - timedelta(minutes=30)\n \n # Delete heartbeat results more than half an hour old,\n # otherwise they just create extra noise\n@@ -149,7 +150,7 @@\n logger.info(\"Could not perform 'delete_successful_tasks' - App registry not ready\")\n return\n \n- threshold = datetime.now() - timedelta(days=30)\n+ threshold = timezone.now() - timedelta(days=30)\n \n results = Success.objects.filter(\n started__lte=threshold\n", "issue": "Heartbeat has a datetime warning\nFrom the worker logs:\r\n`inventree-worker | 00:51:15 [Q] INFO Process-1 created a task from schedule [InvenTree.tasks.heartbeat]\r\ninventree-worker | 00:51:15 [Q] INFO Process-1:3 processing [may-cat-eleven-beer]\r\ninventree-worker | Could not perform heartbeat task - App registry not ready\r\ninventree-worker | /usr/lib/python3.8/site-packages/django/db/models/fields/__init__.py:1416: RuntimeWarning: DateTimeField Task.started received a naive datetime (2021-08-10 00:21:15.816018) while time zone support is active.\r\ninventree-worker | warnings.warn(\"DateTimeField %s received a naive datetime (%s)\"\r\n`\r\n\r\nThe heartbeat task references a non timezoned date which django complains about as USE_TZ is True in the settings.py\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport re\nimport json\nimport requests\nimport logging\n\nfrom datetime import datetime, timedelta\n\nfrom django.core.exceptions import AppRegistryNotReady\nfrom django.db.utils import OperationalError, ProgrammingError\n\n\nlogger = logging.getLogger(\"inventree\")\n\n\ndef schedule_task(taskname, **kwargs):\n \"\"\"\n Create a scheduled task.\n If the task has already been scheduled, ignore!\n \"\"\"\n\n # If unspecified, repeat indefinitely\n repeats = kwargs.pop('repeats', -1)\n kwargs['repeats'] = repeats\n\n try:\n from django_q.models import Schedule\n except (AppRegistryNotReady):\n logger.info(\"Could not start background tasks - App registry not ready\")\n return\n\n try:\n # If this task is already scheduled, don't schedule it again\n # Instead, update the scheduling parameters\n if Schedule.objects.filter(func=taskname).exists():\n logger.info(f\"Scheduled task '{taskname}' already exists - updating!\")\n\n Schedule.objects.filter(func=taskname).update(**kwargs)\n else:\n logger.info(f\"Creating scheduled task '{taskname}'\")\n\n Schedule.objects.create(\n name=taskname,\n func=taskname,\n **kwargs\n )\n except (OperationalError, ProgrammingError):\n # Required if the DB is not ready yet\n pass\n\n\ndef offload_task(taskname, force_sync=False, *args, **kwargs):\n \"\"\"\n Create an AsyncTask if workers are running.\n This is different to a 'scheduled' task,\n in that it only runs once!\n\n If workers are not running or force_sync flag\n is set then the task is ran synchronously.\n \"\"\"\n\n try:\n from django_q.tasks import AsyncTask\n except (AppRegistryNotReady):\n logger.warning(\"Could not offload task - app registry not ready\")\n return\n import importlib\n from InvenTree.status import is_worker_running\n\n if is_worker_running() and not force_sync:\n # Running as asynchronous task\n try:\n task = AsyncTask(taskname, *args, **kwargs)\n task.run()\n except ImportError:\n logger.warning(f\"WARNING: '{taskname}' not started - Function not found\")\n else:\n # Split path\n try:\n app, mod, func = taskname.split('.')\n app_mod = app + '.' + mod\n except ValueError:\n logger.warning(f\"WARNING: '{taskname}' not started - Malformed function path\")\n return\n\n # Import module from app\n try:\n _mod = importlib.import_module(app_mod)\n except ModuleNotFoundError:\n logger.warning(f\"WARNING: '{taskname}' not started - No module named '{app_mod}'\")\n return\n\n # Retrieve function\n try:\n _func = getattr(_mod, func)\n except AttributeError:\n # getattr does not work for local import\n _func = None\n\n try:\n if not _func:\n _func = eval(func)\n except NameError:\n logger.warning(f\"WARNING: '{taskname}' not started - No function named '{func}'\")\n return\n \n # Workers are not running: run it as synchronous task\n _func()\n\n\ndef heartbeat():\n \"\"\"\n Simple task which runs at 5 minute intervals,\n so we can determine that the background worker\n is actually running.\n\n (There is probably a less \"hacky\" way of achieving this)?\n \"\"\"\n\n try:\n from django_q.models import Success\n logger.info(\"Could not perform heartbeat task - App registry not ready\")\n except AppRegistryNotReady:\n return\n\n threshold = datetime.now() - timedelta(minutes=30)\n\n # Delete heartbeat results more than half an hour old,\n # otherwise they just create extra noise\n heartbeats = Success.objects.filter(\n func='InvenTree.tasks.heartbeat',\n started__lte=threshold\n )\n\n heartbeats.delete()\n\n\ndef delete_successful_tasks():\n \"\"\"\n Delete successful task logs\n which are more than a month old.\n \"\"\"\n\n try:\n from django_q.models import Success\n except AppRegistryNotReady:\n logger.info(\"Could not perform 'delete_successful_tasks' - App registry not ready\")\n return\n\n threshold = datetime.now() - timedelta(days=30)\n\n results = Success.objects.filter(\n started__lte=threshold\n )\n\n results.delete()\n\n\ndef check_for_updates():\n \"\"\"\n Check if there is an update for InvenTree\n \"\"\"\n\n try:\n import common.models\n except AppRegistryNotReady:\n # Apps not yet loaded!\n logger.info(\"Could not perform 'check_for_updates' - App registry not ready\")\n return\n\n response = requests.get('https://api.github.com/repos/inventree/inventree/releases/latest')\n\n if not response.status_code == 200:\n raise ValueError(f'Unexpected status code from GitHub API: {response.status_code}')\n\n data = json.loads(response.text)\n\n tag = data.get('tag_name', None)\n\n if not tag:\n raise ValueError(\"'tag_name' missing from GitHub response\")\n\n match = re.match(r\"^.*(\\d+)\\.(\\d+)\\.(\\d+).*$\", tag)\n\n if not len(match.groups()) == 3:\n logger.warning(f\"Version '{tag}' did not match expected pattern\")\n return\n\n latest_version = [int(x) for x in match.groups()]\n\n if not len(latest_version) == 3:\n raise ValueError(f\"Version '{tag}' is not correct format\")\n\n logger.info(f\"Latest InvenTree version: '{tag}'\")\n\n # Save the version to the database\n common.models.InvenTreeSetting.set_setting(\n 'INVENTREE_LATEST_VERSION',\n tag,\n None\n )\n\n\ndef update_exchange_rates():\n \"\"\"\n Update currency exchange rates\n \"\"\"\n\n try:\n from InvenTree.exchange import InvenTreeExchange\n from djmoney.contrib.exchange.models import ExchangeBackend, Rate\n from common.settings import currency_code_default, currency_codes\n except AppRegistryNotReady:\n # Apps not yet loaded!\n logger.info(\"Could not perform 'update_exchange_rates' - App registry not ready\")\n return\n except:\n # Other error?\n return\n\n # Test to see if the database is ready yet\n try:\n backend = ExchangeBackend.objects.get(name='InvenTreeExchange')\n except ExchangeBackend.DoesNotExist:\n pass\n except:\n # Some other error\n print(\"Database not ready\")\n return\n\n backend = InvenTreeExchange()\n print(f\"Updating exchange rates from {backend.url}\")\n\n base = currency_code_default()\n\n print(f\"Using base currency '{base}'\")\n\n backend.update_rates(base_currency=base)\n\n # Remove any exchange rates which are not in the provided currencies\n Rate.objects.filter(backend=\"InvenTreeExchange\").exclude(currency__in=currency_codes()).delete()\n\n\ndef send_email(subject, body, recipients, from_email=None):\n \"\"\"\n Send an email with the specified subject and body,\n to the specified recipients list.\n \"\"\"\n\n if type(recipients) == str:\n recipients = [recipients]\n\n offload_task(\n 'django.core.mail.send_mail',\n subject, body,\n from_email,\n recipients,\n )\n", "path": "InvenTree/InvenTree/tasks.py"}]} | 3,052 | 261 |
gh_patches_debug_54113 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-contrib-348 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix readthedocs build
As discussed in 01/28/2021 SIG.
</issue>
<code>
[start of docs/conf.py]
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # http://www.sphinx-doc.org/en/master/config
6
7 # -- Path setup --------------------------------------------------------------
8
9 # If extensions (or modules to document with autodoc) are in another directory,
10 # add these directories to sys.path here. If the directory is relative to the
11 # documentation root, use os.path.abspath to make it absolute, like shown here.
12
13 import os
14 import sys
15 from configparser import ConfigParser
16 from os import listdir
17 from os.path import isdir, join
18
19 # configure django to avoid the following exception:
20 # django.core.exceptions.ImproperlyConfigured: Requested settings, but settings
21 # are not configured. You must either define the environment variable
22 # DJANGO_SETTINGS_MODULE or call settings.configure() before accessing settings.
23 from django.conf import settings
24
25 settings.configure()
26
27 exp = "../exporter"
28 exp_dirs = [
29 os.path.abspath("/".join(["../exporter", f, "src"]))
30 for f in listdir(exp)
31 if isdir(join(exp, f))
32 ]
33
34 instr = "../instrumentation"
35 instr_dirs = [
36 os.path.abspath("/".join(["../instrumentation", f, "src"]))
37 for f in listdir(instr)
38 if isdir(join(instr, f))
39 ]
40
41 sdk_ext = "../sdk-extension"
42 sdk_ext_dirs = [
43 os.path.abspath("/".join(["../sdk-extension", f, "src"]))
44 for f in listdir(sdk_ext)
45 if isdir(join(sdk_ext, f))
46 ]
47
48 sys.path[:0] = exp_dirs + instr_dirs + sdk_ext_dirs
49
50 # -- Project information -----------------------------------------------------
51
52 project = "OpenTelemetry Python Contrib"
53 copyright = "OpenTelemetry Authors" # pylint: disable=redefined-builtin
54 author = "OpenTelemetry Authors"
55
56
57 # -- General configuration ---------------------------------------------------
58
59 # Easy automatic cross-references for `code in backticks`
60 default_role = "any"
61
62 # Add any Sphinx extension module names here, as strings. They can be
63 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
64 # ones.
65 extensions = [
66 # API doc generation
67 "sphinx.ext.autodoc",
68 # Support for google-style docstrings
69 "sphinx.ext.napoleon",
70 # Infer types from hints instead of docstrings
71 "sphinx_autodoc_typehints",
72 # Add links to source from generated docs
73 "sphinx.ext.viewcode",
74 # Link to other sphinx docs
75 "sphinx.ext.intersphinx",
76 # Add a .nojekyll file to the generated HTML docs
77 # https://help.github.com/en/articles/files-that-start-with-an-underscore-are-missing
78 "sphinx.ext.githubpages",
79 # Support external links to different versions in the Github repo
80 "sphinx.ext.extlinks",
81 ]
82
83 intersphinx_mapping = {
84 "python": ("https://docs.python.org/3/", None),
85 "opentracing": (
86 "https://opentracing-python.readthedocs.io/en/latest/",
87 None,
88 ),
89 "aiohttp": ("https://aiohttp.readthedocs.io/en/stable/", None),
90 "wrapt": ("https://wrapt.readthedocs.io/en/latest/", None),
91 "pymongo": ("https://pymongo.readthedocs.io/en/stable/", None),
92 "opentelemetry": (
93 "https://opentelemetry-python.readthedocs.io/en/latest/",
94 None,
95 ),
96 }
97
98 # http://www.sphinx-doc.org/en/master/config.html#confval-nitpicky
99 # Sphinx will warn about all references where the target cannot be found.
100 nitpicky = True
101 # Sphinx does not recognize generic type TypeVars
102 # Container supposedly were fixed, but does not work
103 # https://github.com/sphinx-doc/sphinx/pull/3744
104 nitpick_ignore = []
105
106 cfg = ConfigParser()
107 cfg.read("./nitpick-exceptions.ini")
108 mcfg = cfg["default"]
109
110
111 def getlistcfg(strval):
112 return [
113 val.strip()
114 for line in strval.split("\n")
115 for val in line.split(",")
116 if val.strip()
117 ]
118
119
120 if "class_references" in mcfg:
121 class_references = getlistcfg(mcfg["class_references"])
122 for class_reference in class_references:
123 nitpick_ignore.append(("py:class", class_reference,))
124
125 if "anys" in mcfg:
126 anys = getlistcfg(mcfg["anys"])
127 for any in anys:
128 nitpick_ignore.append(("any", any,))
129
130 # Add any paths that contain templates here, relative to this directory.
131 templates_path = ["_templates"]
132
133 # List of patterns, relative to source directory, that match files and
134 # directories to ignore when looking for source files.
135 # This pattern also affects html_static_path and html_extra_path.
136 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
137
138 autodoc_default_options = {
139 "members": True,
140 "undoc-members": True,
141 "show-inheritance": True,
142 "member-order": "bysource",
143 }
144
145 # -- Options for HTML output -------------------------------------------------
146
147 # The theme to use for HTML and HTML Help pages. See the documentation for
148 # a list of builtin themes.
149 #
150 html_theme = "sphinx_rtd_theme"
151
152 # Add any paths that contain custom static files (such as style sheets) here,
153 # relative to this directory. They are copied after the builtin static files,
154 # so a file named "default.css" will overwrite the builtin "default.css".
155 html_static_path = []
156
157 # Support external links to specific versions of the files in the Github repo
158 branch = os.environ.get("READTHEDOCS_VERSION")
159 if branch is None or branch == "latest":
160 branch = "main"
161
162 REPO = "open-telemetry/opentelemetry-python-contrib/"
163 scm_raw_web = "https://raw.githubusercontent.com/" + REPO + branch
164 scm_web = "https://github.com/" + REPO + "blob/" + branch
165
166 # Store variables in the epilogue so they are globally available.
167 rst_epilog = """
168 .. |SCM_WEB| replace:: {s}
169 .. |SCM_RAW_WEB| replace:: {sr}
170 .. |SCM_BRANCH| replace:: {b}
171 """.format(
172 s=scm_web, sr=scm_raw_web, b=branch
173 )
174
175 # used to have links to repo files
176 extlinks = {
177 "scm_raw_web": (scm_raw_web + "/%s", "scm_raw_web"),
178 "scm_web": (scm_web + "/%s", "scm_web"),
179 }
180
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -124,8 +124,8 @@
if "anys" in mcfg:
anys = getlistcfg(mcfg["anys"])
- for any in anys:
- nitpick_ignore.append(("any", any,))
+ for _any in anys:
+ nitpick_ignore.append(("any", _any,))
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -124,8 +124,8 @@\n \n if \"anys\" in mcfg:\n anys = getlistcfg(mcfg[\"anys\"])\n- for any in anys:\n- nitpick_ignore.append((\"any\", any,))\n+ for _any in anys:\n+ nitpick_ignore.append((\"any\", _any,))\n \n # Add any paths that contain templates here, relative to this directory.\n templates_path = [\"_templates\"]\n", "issue": "Fix readthedocs build\nAs discussed in 01/28/2021 SIG.\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport sys\nfrom configparser import ConfigParser\nfrom os import listdir\nfrom os.path import isdir, join\n\n# configure django to avoid the following exception:\n# django.core.exceptions.ImproperlyConfigured: Requested settings, but settings\n# are not configured. You must either define the environment variable\n# DJANGO_SETTINGS_MODULE or call settings.configure() before accessing settings.\nfrom django.conf import settings\n\nsettings.configure()\n\nexp = \"../exporter\"\nexp_dirs = [\n os.path.abspath(\"/\".join([\"../exporter\", f, \"src\"]))\n for f in listdir(exp)\n if isdir(join(exp, f))\n]\n\ninstr = \"../instrumentation\"\ninstr_dirs = [\n os.path.abspath(\"/\".join([\"../instrumentation\", f, \"src\"]))\n for f in listdir(instr)\n if isdir(join(instr, f))\n]\n\nsdk_ext = \"../sdk-extension\"\nsdk_ext_dirs = [\n os.path.abspath(\"/\".join([\"../sdk-extension\", f, \"src\"]))\n for f in listdir(sdk_ext)\n if isdir(join(sdk_ext, f))\n]\n\nsys.path[:0] = exp_dirs + instr_dirs + sdk_ext_dirs\n\n# -- Project information -----------------------------------------------------\n\nproject = \"OpenTelemetry Python Contrib\"\ncopyright = \"OpenTelemetry Authors\" # pylint: disable=redefined-builtin\nauthor = \"OpenTelemetry Authors\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Easy automatic cross-references for `code in backticks`\ndefault_role = \"any\"\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n # API doc generation\n \"sphinx.ext.autodoc\",\n # Support for google-style docstrings\n \"sphinx.ext.napoleon\",\n # Infer types from hints instead of docstrings\n \"sphinx_autodoc_typehints\",\n # Add links to source from generated docs\n \"sphinx.ext.viewcode\",\n # Link to other sphinx docs\n \"sphinx.ext.intersphinx\",\n # Add a .nojekyll file to the generated HTML docs\n # https://help.github.com/en/articles/files-that-start-with-an-underscore-are-missing\n \"sphinx.ext.githubpages\",\n # Support external links to different versions in the Github repo\n \"sphinx.ext.extlinks\",\n]\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"opentracing\": (\n \"https://opentracing-python.readthedocs.io/en/latest/\",\n None,\n ),\n \"aiohttp\": (\"https://aiohttp.readthedocs.io/en/stable/\", None),\n \"wrapt\": (\"https://wrapt.readthedocs.io/en/latest/\", None),\n \"pymongo\": (\"https://pymongo.readthedocs.io/en/stable/\", None),\n \"opentelemetry\": (\n \"https://opentelemetry-python.readthedocs.io/en/latest/\",\n None,\n ),\n}\n\n# http://www.sphinx-doc.org/en/master/config.html#confval-nitpicky\n# Sphinx will warn about all references where the target cannot be found.\nnitpicky = True\n# Sphinx does not recognize generic type TypeVars\n# Container supposedly were fixed, but does not work\n# https://github.com/sphinx-doc/sphinx/pull/3744\nnitpick_ignore = []\n\ncfg = ConfigParser()\ncfg.read(\"./nitpick-exceptions.ini\")\nmcfg = cfg[\"default\"]\n\n\ndef getlistcfg(strval):\n return [\n val.strip()\n for line in strval.split(\"\\n\")\n for val in line.split(\",\")\n if val.strip()\n ]\n\n\nif \"class_references\" in mcfg:\n class_references = getlistcfg(mcfg[\"class_references\"])\n for class_reference in class_references:\n nitpick_ignore.append((\"py:class\", class_reference,))\n\nif \"anys\" in mcfg:\n anys = getlistcfg(mcfg[\"anys\"])\n for any in anys:\n nitpick_ignore.append((\"any\", any,))\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\nautodoc_default_options = {\n \"members\": True,\n \"undoc-members\": True,\n \"show-inheritance\": True,\n \"member-order\": \"bysource\",\n}\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = []\n\n# Support external links to specific versions of the files in the Github repo\nbranch = os.environ.get(\"READTHEDOCS_VERSION\")\nif branch is None or branch == \"latest\":\n branch = \"main\"\n\nREPO = \"open-telemetry/opentelemetry-python-contrib/\"\nscm_raw_web = \"https://raw.githubusercontent.com/\" + REPO + branch\nscm_web = \"https://github.com/\" + REPO + \"blob/\" + branch\n\n# Store variables in the epilogue so they are globally available.\nrst_epilog = \"\"\"\n.. |SCM_WEB| replace:: {s}\n.. |SCM_RAW_WEB| replace:: {sr}\n.. |SCM_BRANCH| replace:: {b}\n\"\"\".format(\n s=scm_web, sr=scm_raw_web, b=branch\n)\n\n# used to have links to repo files\nextlinks = {\n \"scm_raw_web\": (scm_raw_web + \"/%s\", \"scm_raw_web\"),\n \"scm_web\": (scm_web + \"/%s\", \"scm_web\"),\n}\n", "path": "docs/conf.py"}]} | 2,425 | 123 |
gh_patches_debug_2193 | rasdani/github-patches | git_diff | ansible-collections__community.general-6695 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
read_csv - Key 'Name' was not found in the CSV header fields
##### SUMMARY
The `read_csv` module fails to identify a field, yet displaces the field in the list of available fields.
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
read_csv
##### ANSIBLE VERSION
```
ansible 2.9.10
config file = /home/anton/git/ansible-deploy-vmware-vm/ansible.cfg
configured module search path = ['/home/anton/git/ansible-deploy-vmware-vm/library']
ansible python module location = /home/anton/.local/lib/python3.6/site-packages/ansible
executable location = /home/anton/.local/bin/ansible
python version = 3.6.9 (default, Apr 18 2020, 01:56:04) [GCC 8.4.0]
```
##### CONFIGURATION
```
# config file for ansible -- http://ansible.com/
# ==============================================
# nearly all parameters can be overridden in ansible-playbook
# or with command line flags. ansible will read ANSIBLE_CONFIG,
# ansible.cfg in the current working directory, .ansible.cfg in
# the home directory or /etc/ansible/ansible.cfg, whichever it
# finds first
[defaults]
host_key_checking = False
host_key_check = False
ansible_python_interpreter=/usr/bin/python3
log_path = ./ansible.log
#bin_ansible_callbacks=True
#stdout_callback = debug
# some basic default values...
library = ./library
# additional paths to search for roles in, colon separated
roles_path = ./roles
[ssh_connection]
# ssh arguments to use
ssh_args = -o StrictHostKeyChecking=no
timeout=60
```
##### OS / ENVIRONMENT
Ubuntu 20:04
##### STEPS TO REPRODUCE
<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->
<!--- Paste example playbooks or commands between quotes below -->
```
---
- name: Right-size VMs
gather_facts: false
hosts: all
connection: local
tasks:
# Read a CSV file and access the first item
- name: Read users from CSV file and return a list
read_csv:
path: "files/vms/6-19-20 Optimization Report - Oversized Virtual Machines Prod2.csv"
key: Name
register: users
- debug:
msg: 'User {{ users.list.2.Name}}'
# msg: 'User {{ users.list.2.Name}} has UID {{ users.list.2.ReclaimablevCPUs}} and GID {{ users.list.2.ReclaimableMemory}}'
# msg: "{{ users }}"
```
<!--- HINT: You can paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
Expect to be able to read CSV values by col name (field) as based on module documentation.
##### ACTUAL RESULTS
```
fatal: [localhost]: FAILED! => {"ansible_facts": {"discovered_interpreter_python": "/usr/bin/python"}, "changed": false, "msg": "Key 'Name' was not found in the CSV header fields: Name, Configured-vCPU, ReclaimablevCPUs, ConfiguredMemory, ReclaimableMemory, ParentvCenter"}
```
</issue>
<code>
[start of plugins/module_utils/csv.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright (c) 2021, Andrew Pantuso (@ajpantuso) <[email protected]>
4 # Copyright (c) 2018, Dag Wieers (@dagwieers) <[email protected]>
5 # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
6 # SPDX-License-Identifier: GPL-3.0-or-later
7
8 from __future__ import absolute_import, division, print_function
9 __metaclass__ = type
10
11 import csv
12 from io import BytesIO, StringIO
13
14 from ansible.module_utils.common.text.converters import to_native
15 from ansible.module_utils.six import PY3
16
17
18 class CustomDialectFailureError(Exception):
19 pass
20
21
22 class DialectNotAvailableError(Exception):
23 pass
24
25
26 CSVError = csv.Error
27
28
29 def initialize_dialect(dialect, **kwargs):
30 # Add Unix dialect from Python 3
31 class unix_dialect(csv.Dialect):
32 """Describe the usual properties of Unix-generated CSV files."""
33 delimiter = ','
34 quotechar = '"'
35 doublequote = True
36 skipinitialspace = False
37 lineterminator = '\n'
38 quoting = csv.QUOTE_ALL
39
40 csv.register_dialect("unix", unix_dialect)
41
42 if dialect not in csv.list_dialects():
43 raise DialectNotAvailableError("Dialect '%s' is not supported by your version of python." % dialect)
44
45 # Create a dictionary from only set options
46 dialect_params = dict((k, v) for k, v in kwargs.items() if v is not None)
47 if dialect_params:
48 try:
49 csv.register_dialect('custom', dialect, **dialect_params)
50 except TypeError as e:
51 raise CustomDialectFailureError("Unable to create custom dialect: %s" % to_native(e))
52 dialect = 'custom'
53
54 return dialect
55
56
57 def read_csv(data, dialect, fieldnames=None):
58
59 data = to_native(data, errors='surrogate_or_strict')
60
61 if PY3:
62 fake_fh = StringIO(data)
63 else:
64 fake_fh = BytesIO(data)
65
66 reader = csv.DictReader(fake_fh, fieldnames=fieldnames, dialect=dialect)
67
68 return reader
69
[end of plugins/module_utils/csv.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugins/module_utils/csv.py b/plugins/module_utils/csv.py
--- a/plugins/module_utils/csv.py
+++ b/plugins/module_utils/csv.py
@@ -55,8 +55,10 @@
def read_csv(data, dialect, fieldnames=None):
-
+ BOM = to_native(u'\ufeff')
data = to_native(data, errors='surrogate_or_strict')
+ if data.startswith(BOM):
+ data = data[len(BOM):]
if PY3:
fake_fh = StringIO(data)
| {"golden_diff": "diff --git a/plugins/module_utils/csv.py b/plugins/module_utils/csv.py\n--- a/plugins/module_utils/csv.py\n+++ b/plugins/module_utils/csv.py\n@@ -55,8 +55,10 @@\n \n \n def read_csv(data, dialect, fieldnames=None):\n-\n+ BOM = to_native(u'\\ufeff')\n data = to_native(data, errors='surrogate_or_strict')\n+ if data.startswith(BOM):\n+ data = data[len(BOM):]\n \n if PY3:\n fake_fh = StringIO(data)\n", "issue": "read_csv - Key 'Name' was not found in the CSV header fields\n##### SUMMARY\r\nThe `read_csv` module fails to identify a field, yet displaces the field in the list of available fields.\r\n\r\n##### ISSUE TYPE\r\n- Bug Report\r\n\r\n##### COMPONENT NAME\r\nread_csv\r\n\r\n##### ANSIBLE VERSION\r\n\r\n```\r\nansible 2.9.10\r\n config file = /home/anton/git/ansible-deploy-vmware-vm/ansible.cfg\r\n configured module search path = ['/home/anton/git/ansible-deploy-vmware-vm/library']\r\n ansible python module location = /home/anton/.local/lib/python3.6/site-packages/ansible\r\n executable location = /home/anton/.local/bin/ansible\r\n python version = 3.6.9 (default, Apr 18 2020, 01:56:04) [GCC 8.4.0]\r\n\r\n```\r\n\r\n##### CONFIGURATION\r\n\r\n```\r\n# config file for ansible -- http://ansible.com/\r\n# ==============================================\r\n\r\n# nearly all parameters can be overridden in ansible-playbook\r\n# or with command line flags. ansible will read ANSIBLE_CONFIG,\r\n# ansible.cfg in the current working directory, .ansible.cfg in\r\n# the home directory or /etc/ansible/ansible.cfg, whichever it\r\n# finds first\r\n\r\n[defaults]\r\nhost_key_checking = False\r\nhost_key_check = False\r\nansible_python_interpreter=/usr/bin/python3\r\nlog_path = ./ansible.log\r\n#bin_ansible_callbacks=True\r\n#stdout_callback = debug\r\n\r\n\r\n# some basic default values...\r\nlibrary = ./library\r\n\r\n# additional paths to search for roles in, colon separated\r\nroles_path = ./roles\r\n\r\n[ssh_connection]\r\n# ssh arguments to use\r\nssh_args = -o StrictHostKeyChecking=no\r\ntimeout=60\r\n\r\n\r\n```\r\n\r\n##### OS / ENVIRONMENT\r\nUbuntu 20:04\r\n\r\n\r\n##### STEPS TO REPRODUCE\r\n<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```\r\n---\r\n- name: Right-size VMs\r\n gather_facts: false\r\n hosts: all\r\n connection: local\r\n tasks:\r\n # Read a CSV file and access the first item\r\n - name: Read users from CSV file and return a list\r\n read_csv:\r\n path: \"files/vms/6-19-20 Optimization Report - Oversized Virtual Machines Prod2.csv\"\r\n key: Name\r\n register: users\r\n\r\n - debug:\r\n msg: 'User {{ users.list.2.Name}}'\r\n # msg: 'User {{ users.list.2.Name}} has UID {{ users.list.2.ReclaimablevCPUs}} and GID {{ users.list.2.ReclaimableMemory}}'\r\n # msg: \"{{ users }}\"\r\n\r\n\r\n\r\n```\r\n\r\n<!--- HINT: You can paste gist.github.com links for larger files -->\r\n\r\n##### EXPECTED RESULTS\r\nExpect to be able to read CSV values by col name (field) as based on module documentation.\r\n\r\n\r\n##### ACTUAL RESULTS\r\n```\r\nfatal: [localhost]: FAILED! => {\"ansible_facts\": {\"discovered_interpreter_python\": \"/usr/bin/python\"}, \"changed\": false, \"msg\": \"Key 'Name' was not found in the CSV header fields: \ufeffName, Configured-vCPU, ReclaimablevCPUs, ConfiguredMemory, ReclaimableMemory, ParentvCenter\"}\r\n```\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2021, Andrew Pantuso (@ajpantuso) <[email protected]>\n# Copyright (c) 2018, Dag Wieers (@dagwieers) <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nimport csv\nfrom io import BytesIO, StringIO\n\nfrom ansible.module_utils.common.text.converters import to_native\nfrom ansible.module_utils.six import PY3\n\n\nclass CustomDialectFailureError(Exception):\n pass\n\n\nclass DialectNotAvailableError(Exception):\n pass\n\n\nCSVError = csv.Error\n\n\ndef initialize_dialect(dialect, **kwargs):\n # Add Unix dialect from Python 3\n class unix_dialect(csv.Dialect):\n \"\"\"Describe the usual properties of Unix-generated CSV files.\"\"\"\n delimiter = ','\n quotechar = '\"'\n doublequote = True\n skipinitialspace = False\n lineterminator = '\\n'\n quoting = csv.QUOTE_ALL\n\n csv.register_dialect(\"unix\", unix_dialect)\n\n if dialect not in csv.list_dialects():\n raise DialectNotAvailableError(\"Dialect '%s' is not supported by your version of python.\" % dialect)\n\n # Create a dictionary from only set options\n dialect_params = dict((k, v) for k, v in kwargs.items() if v is not None)\n if dialect_params:\n try:\n csv.register_dialect('custom', dialect, **dialect_params)\n except TypeError as e:\n raise CustomDialectFailureError(\"Unable to create custom dialect: %s\" % to_native(e))\n dialect = 'custom'\n\n return dialect\n\n\ndef read_csv(data, dialect, fieldnames=None):\n\n data = to_native(data, errors='surrogate_or_strict')\n\n if PY3:\n fake_fh = StringIO(data)\n else:\n fake_fh = BytesIO(data)\n\n reader = csv.DictReader(fake_fh, fieldnames=fieldnames, dialect=dialect)\n\n return reader\n", "path": "plugins/module_utils/csv.py"}]} | 1,907 | 116 |
gh_patches_debug_11630 | rasdani/github-patches | git_diff | mozilla__bugbug-407 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Not all training tasks need commits DB
Indeed I think none of the ones we currently run as part of the data pipeline need the commits.
We should:
- Make the trainer script only download the DBs which are necessary;
- Remove the dependency on the commit retrieval task in the data-pipeline.yml.
</issue>
<code>
[start of scripts/trainer.py]
1 # -*- coding: utf-8 -*-
2
3 import argparse
4 import lzma
5 import os
6 import shutil
7 from logging import INFO, basicConfig, getLogger
8 from urllib.request import urlretrieve
9
10 from bugbug.models.component import ComponentModel
11 from bugbug.models.defect_enhancement_task import DefectEnhancementTaskModel
12 from bugbug.models.regression import RegressionModel
13 from bugbug.models.tracking import TrackingModel
14
15 basicConfig(level=INFO)
16 logger = getLogger(__name__)
17
18 BASE_URL = "https://index.taskcluster.net/v1/task/project.relman.bugbug.data_{}.latest/artifacts/public"
19
20
21 class Trainer(object):
22 def decompress_file(self, path):
23 with lzma.open(f"{path}.xz", "rb") as input_f:
24 with open(path, "wb") as output_f:
25 shutil.copyfileobj(input_f, output_f)
26
27 def compress_file(self, path):
28 with open(path, "rb") as input_f:
29 with lzma.open(f"{path}.xz", "wb") as output_f:
30 shutil.copyfileobj(input_f, output_f)
31
32 def train_defect_enhancement_task(self):
33 logger.info("Training *defect vs enhancement vs task* model")
34 model = DefectEnhancementTaskModel()
35 model.train()
36 self.compress_file("defectenhancementtaskmodel")
37
38 def train_component(self):
39 logger.info("Training *component* model")
40 model = ComponentModel()
41 model.train()
42 self.compress_file("componentmodel")
43
44 def train_regression(self):
45 logger.info("Training *regression vs non-regression* model")
46 model = RegressionModel()
47 model.train()
48 self.compress_file("regressionmodel")
49
50 def train_tracking(self):
51 logger.info("Training *tracking* model")
52 model = TrackingModel()
53 model.train()
54 self.compress_file("trackingmodel")
55
56 def go(self, model):
57 # TODO: Stop hard-coding them
58 valid_models = ["defect", "component", "regression", "tracking"]
59
60 if model not in valid_models:
61 exception = (
62 f"Invalid model {model!r} name, use one of {valid_models!r} instead"
63 )
64 raise ValueError(exception)
65
66 # Download datasets that were built by bugbug_data.
67 os.makedirs("data", exist_ok=True)
68
69 # Bugs.json
70 logger.info("Downloading bugs database")
71 bugs_url = BASE_URL.format("bugs")
72 urlretrieve(f"{bugs_url}/bugs.json.xz", "data/bugs.json.xz")
73 logger.info("Decompressing bugs database")
74 self.decompress_file("data/bugs.json")
75
76 # Commits.json
77 logger.info("Downloading commits database")
78 commits_url = BASE_URL.format("commits")
79 urlretrieve(f"{commits_url}/commits.json.xz", "data/commits.json.xz")
80 logger.info("Decompressing commits database")
81 self.decompress_file("data/commits.json")
82
83 if model == "defect":
84 # Train classifier for defect-vs-enhancement-vs-task.
85 self.train_defect_enhancement_task()
86 elif model == "component":
87 # Train classifier for the component of a bug.
88 self.train_component()
89 elif model == "regression":
90 # Train classifier for regression-vs-nonregression.
91 self.train_regression()
92 elif model == "tracking":
93 # Train classifier for tracking bugs.
94 self.train_tracking()
95 else:
96 # We shouldn't be here
97 raise Exception("valid_models is likely not up-to-date anymore")
98
99
100 def main():
101 description = "Train the models"
102 parser = argparse.ArgumentParser(description=description)
103
104 parser.add_argument("model", help="Which model to train.")
105
106 args = parser.parse_args()
107
108 retriever = Trainer()
109 retriever.go(args.model)
110
[end of scripts/trainer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/trainer.py b/scripts/trainer.py
--- a/scripts/trainer.py
+++ b/scripts/trainer.py
@@ -73,13 +73,6 @@
logger.info("Decompressing bugs database")
self.decompress_file("data/bugs.json")
- # Commits.json
- logger.info("Downloading commits database")
- commits_url = BASE_URL.format("commits")
- urlretrieve(f"{commits_url}/commits.json.xz", "data/commits.json.xz")
- logger.info("Decompressing commits database")
- self.decompress_file("data/commits.json")
-
if model == "defect":
# Train classifier for defect-vs-enhancement-vs-task.
self.train_defect_enhancement_task()
| {"golden_diff": "diff --git a/scripts/trainer.py b/scripts/trainer.py\n--- a/scripts/trainer.py\n+++ b/scripts/trainer.py\n@@ -73,13 +73,6 @@\n logger.info(\"Decompressing bugs database\")\n self.decompress_file(\"data/bugs.json\")\n \n- # Commits.json\n- logger.info(\"Downloading commits database\")\n- commits_url = BASE_URL.format(\"commits\")\n- urlretrieve(f\"{commits_url}/commits.json.xz\", \"data/commits.json.xz\")\n- logger.info(\"Decompressing commits database\")\n- self.decompress_file(\"data/commits.json\")\n-\n if model == \"defect\":\n # Train classifier for defect-vs-enhancement-vs-task.\n self.train_defect_enhancement_task()\n", "issue": "Not all training tasks need commits DB\nIndeed I think none of the ones we currently run as part of the data pipeline need the commits.\r\nWe should:\r\n- Make the trainer script only download the DBs which are necessary;\r\n- Remove the dependency on the commit retrieval task in the data-pipeline.yml.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport argparse\nimport lzma\nimport os\nimport shutil\nfrom logging import INFO, basicConfig, getLogger\nfrom urllib.request import urlretrieve\n\nfrom bugbug.models.component import ComponentModel\nfrom bugbug.models.defect_enhancement_task import DefectEnhancementTaskModel\nfrom bugbug.models.regression import RegressionModel\nfrom bugbug.models.tracking import TrackingModel\n\nbasicConfig(level=INFO)\nlogger = getLogger(__name__)\n\nBASE_URL = \"https://index.taskcluster.net/v1/task/project.relman.bugbug.data_{}.latest/artifacts/public\"\n\n\nclass Trainer(object):\n def decompress_file(self, path):\n with lzma.open(f\"{path}.xz\", \"rb\") as input_f:\n with open(path, \"wb\") as output_f:\n shutil.copyfileobj(input_f, output_f)\n\n def compress_file(self, path):\n with open(path, \"rb\") as input_f:\n with lzma.open(f\"{path}.xz\", \"wb\") as output_f:\n shutil.copyfileobj(input_f, output_f)\n\n def train_defect_enhancement_task(self):\n logger.info(\"Training *defect vs enhancement vs task* model\")\n model = DefectEnhancementTaskModel()\n model.train()\n self.compress_file(\"defectenhancementtaskmodel\")\n\n def train_component(self):\n logger.info(\"Training *component* model\")\n model = ComponentModel()\n model.train()\n self.compress_file(\"componentmodel\")\n\n def train_regression(self):\n logger.info(\"Training *regression vs non-regression* model\")\n model = RegressionModel()\n model.train()\n self.compress_file(\"regressionmodel\")\n\n def train_tracking(self):\n logger.info(\"Training *tracking* model\")\n model = TrackingModel()\n model.train()\n self.compress_file(\"trackingmodel\")\n\n def go(self, model):\n # TODO: Stop hard-coding them\n valid_models = [\"defect\", \"component\", \"regression\", \"tracking\"]\n\n if model not in valid_models:\n exception = (\n f\"Invalid model {model!r} name, use one of {valid_models!r} instead\"\n )\n raise ValueError(exception)\n\n # Download datasets that were built by bugbug_data.\n os.makedirs(\"data\", exist_ok=True)\n\n # Bugs.json\n logger.info(\"Downloading bugs database\")\n bugs_url = BASE_URL.format(\"bugs\")\n urlretrieve(f\"{bugs_url}/bugs.json.xz\", \"data/bugs.json.xz\")\n logger.info(\"Decompressing bugs database\")\n self.decompress_file(\"data/bugs.json\")\n\n # Commits.json\n logger.info(\"Downloading commits database\")\n commits_url = BASE_URL.format(\"commits\")\n urlretrieve(f\"{commits_url}/commits.json.xz\", \"data/commits.json.xz\")\n logger.info(\"Decompressing commits database\")\n self.decompress_file(\"data/commits.json\")\n\n if model == \"defect\":\n # Train classifier for defect-vs-enhancement-vs-task.\n self.train_defect_enhancement_task()\n elif model == \"component\":\n # Train classifier for the component of a bug.\n self.train_component()\n elif model == \"regression\":\n # Train classifier for regression-vs-nonregression.\n self.train_regression()\n elif model == \"tracking\":\n # Train classifier for tracking bugs.\n self.train_tracking()\n else:\n # We shouldn't be here\n raise Exception(\"valid_models is likely not up-to-date anymore\")\n\n\ndef main():\n description = \"Train the models\"\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\"model\", help=\"Which model to train.\")\n\n args = parser.parse_args()\n\n retriever = Trainer()\n retriever.go(args.model)\n", "path": "scripts/trainer.py"}]} | 1,638 | 173 |
gh_patches_debug_10182 | rasdani/github-patches | git_diff | getredash__redash-998 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Embed query description appearing larger than vizualization name
The query description is appearing larger then the visualization name:

</issue>
<code>
[start of redash/handlers/embed.py]
1 import json
2
3 from funcy import project
4 from flask import render_template, request
5 from flask_login import login_required, current_user
6 from flask_restful import abort
7
8 from redash import models, settings
9 from redash import serializers
10 from redash.utils import json_dumps
11 from redash.handlers import routes
12 from redash.handlers.base import org_scoped_rule
13 from redash.permissions import require_access, view_only
14 from authentication import current_org
15
16
17 @routes.route(org_scoped_rule('/embed/query/<query_id>/visualization/<visualization_id>'), methods=['GET'])
18 @login_required
19 def embed(query_id, visualization_id, org_slug=None):
20 # TODO: add event for embed access
21 query = models.Query.get_by_id_and_org(query_id, current_org)
22 require_access(query.groups, current_user, view_only)
23 vis = query.visualizations.where(models.Visualization.id == visualization_id).first()
24 qr = {}
25
26 if vis is not None:
27 vis = vis.to_dict()
28 qr = query.latest_query_data
29 if qr is None:
30 abort(400, message="No Results for this query")
31 else:
32 qr = qr.to_dict()
33 else:
34 abort(404, message="Visualization not found.")
35
36 client_config = {}
37 client_config.update(settings.COMMON_CLIENT_CONFIG)
38
39 qr = project(qr, ('data', 'id', 'retrieved_at'))
40 vis = project(vis, ('description', 'name', 'id', 'options', 'query', 'type', 'updated_at'))
41 vis['query'] = project(vis['query'], ('created_at', 'description', 'name', 'id', 'latest_query_data_id', 'name', 'updated_at'))
42
43 return render_template("embed.html",
44
45 client_config=json_dumps(client_config),
46 visualization=json_dumps(vis),
47 query_result=json_dumps(qr))
48
49
50 @routes.route(org_scoped_rule('/public/dashboards/<token>'), methods=['GET'])
51 @login_required
52 def public_dashboard(token, org_slug=None):
53 # TODO: verify object is a dashboard?
54 if not isinstance(current_user, models.ApiUser):
55 api_key = models.ApiKey.get_by_api_key(token)
56 dashboard = api_key.object
57 else:
58 dashboard = current_user.object
59
60 user = {
61 'permissions': [],
62 'apiKey': current_user.id
63 }
64
65 headers = {
66 'Cache-Control': 'no-cache, no-store, max-age=0, must-revalidate'
67 }
68
69 response = render_template("public.html",
70 headless='embed' in request.args,
71 user=json.dumps(user),
72 seed_data=json_dumps({
73 'dashboard': serializers.public_dashboard(dashboard)
74 }),
75 client_config=json.dumps(settings.COMMON_CLIENT_CONFIG))
76
77 return response, 200, headers
78
[end of redash/handlers/embed.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/redash/handlers/embed.py b/redash/handlers/embed.py
--- a/redash/handlers/embed.py
+++ b/redash/handlers/embed.py
@@ -41,7 +41,6 @@
vis['query'] = project(vis['query'], ('created_at', 'description', 'name', 'id', 'latest_query_data_id', 'name', 'updated_at'))
return render_template("embed.html",
-
client_config=json_dumps(client_config),
visualization=json_dumps(vis),
query_result=json_dumps(qr))
| {"golden_diff": "diff --git a/redash/handlers/embed.py b/redash/handlers/embed.py\n--- a/redash/handlers/embed.py\n+++ b/redash/handlers/embed.py\n@@ -41,7 +41,6 @@\n vis['query'] = project(vis['query'], ('created_at', 'description', 'name', 'id', 'latest_query_data_id', 'name', 'updated_at'))\n \n return render_template(\"embed.html\",\n-\n client_config=json_dumps(client_config),\n visualization=json_dumps(vis),\n query_result=json_dumps(qr))\n", "issue": "Embed query description appearing larger than vizualization name\nThe query description is appearing larger then the visualization name:\n\n\n\n", "before_files": [{"content": "import json\n\nfrom funcy import project\nfrom flask import render_template, request\nfrom flask_login import login_required, current_user\nfrom flask_restful import abort\n\nfrom redash import models, settings\nfrom redash import serializers\nfrom redash.utils import json_dumps\nfrom redash.handlers import routes\nfrom redash.handlers.base import org_scoped_rule\nfrom redash.permissions import require_access, view_only\nfrom authentication import current_org\n\n\[email protected](org_scoped_rule('/embed/query/<query_id>/visualization/<visualization_id>'), methods=['GET'])\n@login_required\ndef embed(query_id, visualization_id, org_slug=None):\n # TODO: add event for embed access\n query = models.Query.get_by_id_and_org(query_id, current_org)\n require_access(query.groups, current_user, view_only)\n vis = query.visualizations.where(models.Visualization.id == visualization_id).first()\n qr = {}\n\n if vis is not None:\n vis = vis.to_dict()\n qr = query.latest_query_data\n if qr is None:\n abort(400, message=\"No Results for this query\")\n else:\n qr = qr.to_dict()\n else:\n abort(404, message=\"Visualization not found.\")\n\n client_config = {}\n client_config.update(settings.COMMON_CLIENT_CONFIG)\n\n qr = project(qr, ('data', 'id', 'retrieved_at'))\n vis = project(vis, ('description', 'name', 'id', 'options', 'query', 'type', 'updated_at'))\n vis['query'] = project(vis['query'], ('created_at', 'description', 'name', 'id', 'latest_query_data_id', 'name', 'updated_at'))\n\n return render_template(\"embed.html\",\n\n client_config=json_dumps(client_config),\n visualization=json_dumps(vis),\n query_result=json_dumps(qr))\n\n\[email protected](org_scoped_rule('/public/dashboards/<token>'), methods=['GET'])\n@login_required\ndef public_dashboard(token, org_slug=None):\n # TODO: verify object is a dashboard?\n if not isinstance(current_user, models.ApiUser):\n api_key = models.ApiKey.get_by_api_key(token)\n dashboard = api_key.object\n else:\n dashboard = current_user.object\n\n user = {\n 'permissions': [],\n 'apiKey': current_user.id\n }\n\n headers = {\n 'Cache-Control': 'no-cache, no-store, max-age=0, must-revalidate'\n }\n\n response = render_template(\"public.html\",\n headless='embed' in request.args,\n user=json.dumps(user),\n seed_data=json_dumps({\n 'dashboard': serializers.public_dashboard(dashboard)\n }),\n client_config=json.dumps(settings.COMMON_CLIENT_CONFIG))\n\n return response, 200, headers\n", "path": "redash/handlers/embed.py"}]} | 1,375 | 124 |
gh_patches_debug_12620 | rasdani/github-patches | git_diff | kivy__kivy-5187 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Kivy breaks Clipboard
### Versions
* Python: 2.7.12
* OS: Windows 10
* Kivy: 1.9.2-dev0
* Kivy installation method: wheel
### Description
When pasting some data into a `TextInput`, the clipboard breaks across the system, and copying and pasting is not possible until the Kivy app is terminated. Specifically, I found the following steps to reproduce the problem:
1. Try copying a file into the `TextInput` box (nothing will paste in as expected)
2. Try copying some text somewhere else (does not have to be in the `TextInput`)
After step 1, nothing is copied or pasted and the Kivy application must be terminated before the clipboard starts working again.
</issue>
<code>
[start of kivy/core/clipboard/clipboard_winctypes.py]
1 '''
2 Clipboard windows: an implementation of the Clipboard using ctypes.
3 '''
4
5 __all__ = ('ClipboardWindows', )
6
7 from kivy.utils import platform
8 from kivy.core.clipboard import ClipboardBase
9
10 if platform != 'win':
11 raise SystemError('unsupported platform for Windows clipboard')
12
13 import ctypes
14 from ctypes import wintypes
15 user32 = ctypes.windll.user32
16 kernel32 = ctypes.windll.kernel32
17 msvcrt = ctypes.cdll.msvcrt
18 c_char_p = ctypes.c_char_p
19 c_wchar_p = ctypes.c_wchar_p
20
21
22 class ClipboardWindows(ClipboardBase):
23
24 def get(self, mimetype='text/plain'):
25 GetClipboardData = user32.GetClipboardData
26 GetClipboardData.argtypes = [wintypes.UINT]
27 GetClipboardData.restype = wintypes.HANDLE
28
29 user32.OpenClipboard(user32.GetActiveWindow())
30 # 1 is CF_TEXT
31 pcontents = GetClipboardData(13)
32 if not pcontents:
33 return ''
34 data = c_wchar_p(pcontents).value.encode(self._encoding)
35 user32.CloseClipboard()
36 return data
37
38 def put(self, text, mimetype='text/plain'):
39 text = text.decode(self._encoding) # auto converted later
40 text += u'\x00'
41
42 SetClipboardData = user32.SetClipboardData
43 SetClipboardData.argtypes = [wintypes.UINT, wintypes.HANDLE]
44 SetClipboardData.restype = wintypes.HANDLE
45
46 GlobalAlloc = kernel32.GlobalAlloc
47 GlobalAlloc.argtypes = [wintypes.UINT, ctypes.c_size_t]
48 GlobalAlloc.restype = wintypes.HGLOBAL
49
50 CF_UNICODETEXT = 13
51
52 user32.OpenClipboard(user32.GetActiveWindow())
53 user32.EmptyClipboard()
54 hCd = GlobalAlloc(0, len(text) * ctypes.sizeof(ctypes.c_wchar))
55 msvcrt.wcscpy_s(c_wchar_p(hCd), len(text), c_wchar_p(text))
56 SetClipboardData(CF_UNICODETEXT, hCd)
57 user32.CloseClipboard()
58
59 def get_types(self):
60 return ['text/plain']
61
[end of kivy/core/clipboard/clipboard_winctypes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kivy/core/clipboard/clipboard_winctypes.py b/kivy/core/clipboard/clipboard_winctypes.py
--- a/kivy/core/clipboard/clipboard_winctypes.py
+++ b/kivy/core/clipboard/clipboard_winctypes.py
@@ -27,9 +27,13 @@
GetClipboardData.restype = wintypes.HANDLE
user32.OpenClipboard(user32.GetActiveWindow())
- # 1 is CF_TEXT
+ # Standard Clipboard Format "1" is "CF_TEXT"
pcontents = GetClipboardData(13)
+
+ # if someone pastes a FILE, the content is None for SCF 13
+ # and the clipboard is locked if not closed properly
if not pcontents:
+ user32.CloseClipboard()
return ''
data = c_wchar_p(pcontents).value.encode(self._encoding)
user32.CloseClipboard()
| {"golden_diff": "diff --git a/kivy/core/clipboard/clipboard_winctypes.py b/kivy/core/clipboard/clipboard_winctypes.py\n--- a/kivy/core/clipboard/clipboard_winctypes.py\n+++ b/kivy/core/clipboard/clipboard_winctypes.py\n@@ -27,9 +27,13 @@\n GetClipboardData.restype = wintypes.HANDLE\n \n user32.OpenClipboard(user32.GetActiveWindow())\n- # 1 is CF_TEXT\n+ # Standard Clipboard Format \"1\" is \"CF_TEXT\"\n pcontents = GetClipboardData(13)\n+\n+ # if someone pastes a FILE, the content is None for SCF 13\n+ # and the clipboard is locked if not closed properly\n if not pcontents:\n+ user32.CloseClipboard()\n return ''\n data = c_wchar_p(pcontents).value.encode(self._encoding)\n user32.CloseClipboard()\n", "issue": "Kivy breaks Clipboard\n### Versions\r\n\r\n* Python: 2.7.12\r\n* OS: Windows 10\r\n* Kivy: 1.9.2-dev0\r\n* Kivy installation method: wheel\r\n\r\n### Description\r\n\r\nWhen pasting some data into a `TextInput`, the clipboard breaks across the system, and copying and pasting is not possible until the Kivy app is terminated. Specifically, I found the following steps to reproduce the problem:\r\n1. Try copying a file into the `TextInput` box (nothing will paste in as expected)\r\n2. Try copying some text somewhere else (does not have to be in the `TextInput`)\r\n\r\nAfter step 1, nothing is copied or pasted and the Kivy application must be terminated before the clipboard starts working again.\n", "before_files": [{"content": "'''\nClipboard windows: an implementation of the Clipboard using ctypes.\n'''\n\n__all__ = ('ClipboardWindows', )\n\nfrom kivy.utils import platform\nfrom kivy.core.clipboard import ClipboardBase\n\nif platform != 'win':\n raise SystemError('unsupported platform for Windows clipboard')\n\nimport ctypes\nfrom ctypes import wintypes\nuser32 = ctypes.windll.user32\nkernel32 = ctypes.windll.kernel32\nmsvcrt = ctypes.cdll.msvcrt\nc_char_p = ctypes.c_char_p\nc_wchar_p = ctypes.c_wchar_p\n\n\nclass ClipboardWindows(ClipboardBase):\n\n def get(self, mimetype='text/plain'):\n GetClipboardData = user32.GetClipboardData\n GetClipboardData.argtypes = [wintypes.UINT]\n GetClipboardData.restype = wintypes.HANDLE\n\n user32.OpenClipboard(user32.GetActiveWindow())\n # 1 is CF_TEXT\n pcontents = GetClipboardData(13)\n if not pcontents:\n return ''\n data = c_wchar_p(pcontents).value.encode(self._encoding)\n user32.CloseClipboard()\n return data\n\n def put(self, text, mimetype='text/plain'):\n text = text.decode(self._encoding) # auto converted later\n text += u'\\x00'\n\n SetClipboardData = user32.SetClipboardData\n SetClipboardData.argtypes = [wintypes.UINT, wintypes.HANDLE]\n SetClipboardData.restype = wintypes.HANDLE\n\n GlobalAlloc = kernel32.GlobalAlloc\n GlobalAlloc.argtypes = [wintypes.UINT, ctypes.c_size_t]\n GlobalAlloc.restype = wintypes.HGLOBAL\n\n CF_UNICODETEXT = 13\n\n user32.OpenClipboard(user32.GetActiveWindow())\n user32.EmptyClipboard()\n hCd = GlobalAlloc(0, len(text) * ctypes.sizeof(ctypes.c_wchar))\n msvcrt.wcscpy_s(c_wchar_p(hCd), len(text), c_wchar_p(text))\n SetClipboardData(CF_UNICODETEXT, hCd)\n user32.CloseClipboard()\n\n def get_types(self):\n return ['text/plain']\n", "path": "kivy/core/clipboard/clipboard_winctypes.py"}]} | 1,308 | 204 |
gh_patches_debug_5831 | rasdani/github-patches | git_diff | sherlock-project__sherlock-139 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Sites sorting
It may be a good idea to sort the sites in sites.md and data.json alphabetically. When I'm looking for sites to add, I always have to Ctrl+F in this repo or just scroll through the file... Also when seeing the results, it's just chaos.
</issue>
<code>
[start of site_list.py]
1 """Sherlock: Supported Site Listing
2
3 This module generates the listing of supported sites.
4 """
5 import json
6
7 with open("data.json", "r", encoding="utf-8") as data_file:
8 data = json.load(data_file)
9
10 with open("sites.md", "w") as site_file:
11 site_file.write(f'## List Of Supported Sites ({len(data)} Sites In Total!)\n')
12
13 index = 1
14 for social_network in data:
15 url_main = data.get(social_network).get("urlMain")
16 site_file.write(f'{index}. [{social_network}]({url_main})\n')
17 index = index + 1
18
19 print("Finished updating supported site listing!")
20
[end of site_list.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/site_list.py b/site_list.py
--- a/site_list.py
+++ b/site_list.py
@@ -7,6 +7,11 @@
with open("data.json", "r", encoding="utf-8") as data_file:
data = json.load(data_file)
+sorted_json_data = json.dumps(data, indent=2, sort_keys=True)
+
+with open("data.json", "w") as data_file:
+ data_file.write(sorted_json_data)
+
with open("sites.md", "w") as site_file:
site_file.write(f'## List Of Supported Sites ({len(data)} Sites In Total!)\n')
| {"golden_diff": "diff --git a/site_list.py b/site_list.py\n--- a/site_list.py\n+++ b/site_list.py\n@@ -7,6 +7,11 @@\n with open(\"data.json\", \"r\", encoding=\"utf-8\") as data_file:\n data = json.load(data_file)\n \n+sorted_json_data = json.dumps(data, indent=2, sort_keys=True)\n+\n+with open(\"data.json\", \"w\") as data_file:\n+ data_file.write(sorted_json_data)\n+\n with open(\"sites.md\", \"w\") as site_file:\n site_file.write(f'## List Of Supported Sites ({len(data)} Sites In Total!)\\n')\n", "issue": "Sites sorting\nIt may be a good idea to sort the sites in sites.md and data.json alphabetically. When I'm looking for sites to add, I always have to Ctrl+F in this repo or just scroll through the file... Also when seeing the results, it's just chaos.\n", "before_files": [{"content": "\"\"\"Sherlock: Supported Site Listing\n\nThis module generates the listing of supported sites.\n\"\"\"\nimport json\n\nwith open(\"data.json\", \"r\", encoding=\"utf-8\") as data_file:\n data = json.load(data_file)\n\nwith open(\"sites.md\", \"w\") as site_file:\n site_file.write(f'## List Of Supported Sites ({len(data)} Sites In Total!)\\n')\n\n index = 1\n for social_network in data:\n url_main = data.get(social_network).get(\"urlMain\")\n site_file.write(f'{index}. [{social_network}]({url_main})\\n')\n index = index + 1\n\nprint(\"Finished updating supported site listing!\")\n", "path": "site_list.py"}]} | 771 | 140 |
gh_patches_debug_7015 | rasdani/github-patches | git_diff | ibis-project__ibis-798 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ImpalaTable.describe_formatted may throw an exception
excerpt. Will investigate soon
``` python
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-115-87d40e40503e> in <module>()
----> 1 repr(db.csv_as_parquet.describe_formatted())
/home/wesm/code/cloudera/ibis/ibis/impala/metadata.py in __repr__(self)
295 data['partition schema'] = self.partitions
296
--> 297 pprint.pprint(data, stream=buf)
298
299 return buf.getvalue()
/home/wesm/anaconda3/lib/python3.5/pprint.py in pprint(object, stream, indent, width, depth, compact)
51 stream=stream, indent=indent, width=width, depth=depth,
52 compact=compact)
---> 53 printer.pprint(object)
54
55 def pformat(object, indent=1, width=80, depth=None, *, compact=False):
```
</issue>
<code>
[start of ibis/impala/metadata.py]
1 # Copyright 2014 Cloudera Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from six import StringIO
16 import pandas as pd
17
18
19 def parse_metadata(descr_table):
20 parser = MetadataParser(descr_table)
21 return parser.parse()
22
23
24 def _noop(tup):
25 return None
26
27
28 def _item_converter(i):
29 def _get_item(converter=None):
30 def _converter(tup):
31 result = tup[i]
32 if converter is not None:
33 result = converter(result)
34 return result
35
36 return _converter
37
38 return _get_item
39
40 _get_type = _item_converter(1)
41 _get_comment = _item_converter(2)
42
43
44 def _try_timestamp(x):
45 try:
46 return pd.Timestamp(x)
47 except (ValueError, TypeError):
48 return x
49
50
51 def _try_unix_timestamp(x):
52 try:
53 return pd.Timestamp.fromtimestamp(int(x))
54 except (ValueError, TypeError):
55 return x
56
57
58 def _try_boolean(x):
59 try:
60 x = x.lower()
61 if x in ('true', 'yes'):
62 return True
63 elif x in ('false', 'no'):
64 return False
65 return x
66 except (ValueError, TypeError):
67 return x
68
69
70 def _try_int(x):
71 try:
72 return int(x)
73 except (ValueError, TypeError):
74 return x
75
76
77 class MetadataParser(object):
78
79 """
80 A simple state-ish machine to parse the results of DESCRIBE FORMATTED
81 """
82
83 def __init__(self, table):
84 self.table = table
85 self.tuples = list(self.table.itertuples(index=False))
86
87 def _reset(self):
88 self.pos = 0
89 self.schema = None
90 self.partitions = None
91 self.info = None
92 self.storage = None
93
94 def _next_tuple(self):
95 if self.pos == len(self.tuples):
96 raise StopIteration
97
98 result = self.tuples[self.pos]
99 self.pos += 1
100 return result
101
102 def parse(self):
103 self._reset()
104 self._parse()
105
106 return TableMetadata(self.schema, self.info, self.storage,
107 partitions=self.partitions)
108
109 def _parse(self):
110 self.schema = self._parse_schema()
111
112 next_section = self._next_tuple()
113 if 'partition' in next_section[0].lower():
114 self._parse_partitions()
115 else:
116 self._parse_info()
117
118 def _parse_partitions(self):
119 self.partitions = self._parse_schema()
120
121 next_section = self._next_tuple()
122 if 'table information' not in next_section[0].lower():
123 raise ValueError('Table information not present')
124
125 self._parse_info()
126
127 def _parse_schema(self):
128 tup = self._next_tuple()
129 if 'col_name' not in tup[0]:
130 raise ValueError('DESCRIBE FORMATTED did not return '
131 'the expected results: {0}'
132 .format(tup))
133 self._next_tuple()
134
135 # Use for both main schema and partition schema (if any)
136 schema = []
137 while True:
138 tup = self._next_tuple()
139 if tup[0].strip() == '':
140 break
141 schema.append((tup[0], tup[1]))
142
143 return schema
144
145 def _parse_info(self):
146 self.info = {}
147 while True:
148 tup = self._next_tuple()
149 orig_key = tup[0].strip(':')
150 key = _clean_param_name(tup[0])
151
152 if key == '' or key.startswith('#'):
153 # section is done
154 break
155
156 if key == 'table parameters':
157 self._parse_table_parameters()
158 elif key in self._info_cleaners:
159 result = self._info_cleaners[key](tup)
160 self.info[orig_key] = result
161 else:
162 self.info[orig_key] = tup[1]
163
164 if 'storage information' not in key:
165 raise ValueError('Storage information not present')
166
167 self._parse_storage_info()
168
169 _info_cleaners = {
170 'database': _get_type(),
171 'owner': _get_type(),
172 'createtime': _get_type(_try_timestamp),
173 'lastaccesstime': _get_type(_try_timestamp),
174 'protect mode': _get_type(),
175 'retention': _get_type(_try_int),
176 'location': _get_type(),
177 'table type': _get_type()
178 }
179
180 def _parse_table_parameters(self):
181 params = self._parse_nested_params(self._table_param_cleaners)
182 self.info['Table Parameters'] = params
183
184 _table_param_cleaners = {
185 'external': _try_boolean,
186 'column_stats_accurate': _try_boolean,
187 'numfiles': _try_int,
188 'totalsize': _try_int,
189 'stats_generated_via_stats_task': _try_boolean,
190 'numrows': _try_int,
191 'transient_lastddltime': _try_unix_timestamp,
192 }
193
194 def _parse_storage_info(self):
195 self.storage = {}
196 while True:
197 # end of the road
198 try:
199 tup = self._next_tuple()
200 except StopIteration:
201 break
202
203 orig_key = tup[0].strip(':')
204 key = _clean_param_name(tup[0])
205
206 if key == '' or key.startswith('#'):
207 # section is done
208 break
209
210 if key == 'storage desc params':
211 self._parse_storage_desc_params()
212 elif key in self._storage_cleaners:
213 result = self._storage_cleaners[key](tup)
214 self.storage[orig_key] = result
215 else:
216 self.storage[orig_key] = tup[1]
217
218 _storage_cleaners = {
219 'compressed': _get_type(_try_boolean),
220 'num buckets': _get_type(_try_int),
221 }
222
223 def _parse_storage_desc_params(self):
224 params = self._parse_nested_params(self._storage_param_cleaners)
225 self.storage['Desc Params'] = params
226
227 _storage_param_cleaners = {}
228
229 def _parse_nested_params(self, cleaners):
230 params = {}
231 while True:
232 try:
233 tup = self._next_tuple()
234 except StopIteration:
235 break
236 if pd.isnull(tup[1]):
237 break
238
239 key, value = tup[1:]
240
241 if key.lower() in cleaners:
242 cleaner = cleaners[key.lower()]
243 value = cleaner(value)
244 params[key] = value
245
246 return params
247
248
249 def _clean_param_name(x):
250 return x.strip().strip(':').lower()
251
252
253 def _get_meta(attr, key):
254 @property
255 def f(self):
256 data = getattr(self, attr)
257 if isinstance(key, list):
258 result = data
259 for k in key:
260 if k not in result:
261 raise KeyError(k)
262 result = result[k]
263 return result
264 else:
265 return data[key]
266 return f
267
268
269 class TableMetadata(object):
270
271 """
272 Container for the parsed and wrangled results of DESCRIBE FORMATTED for
273 easier Ibis use (and testing).
274 """
275 def __init__(self, schema, info, storage, partitions=None):
276 self.schema = schema
277 self.info = info
278 self.storage = storage
279 self.partitions = partitions
280
281 def __repr__(self):
282 import pprint
283
284 # Quick and dirty for now
285 buf = StringIO()
286 buf.write(str(type(self)))
287 buf.write('\n')
288
289 data = {
290 'schema': self.schema,
291 'info': self.info,
292 'storage info': self.storage
293 }
294 if self.partitions is not None:
295 data['partition schema'] = self.partitions
296
297 pprint.pprint(data, stream=buf)
298
299 return buf.getvalue()
300
301 @property
302 def is_partitioned(self):
303 return self.partitions is not None
304
305 create_time = _get_meta('info', 'CreateTime')
306 location = _get_meta('info', 'Location')
307 owner = _get_meta('info', 'Owner')
308 num_rows = _get_meta('info', ['Table Parameters', 'numRows'])
309 hive_format = _get_meta('storage', 'InputFormat')
310
311 tbl_properties = _get_meta('info', 'Table Parameters')
312 serde_properties = _get_meta('storage', 'Desc Params')
313
314
315 class TableInfo(object):
316 pass
317
318
319 class TableStorageInfo(object):
320 pass
321
[end of ibis/impala/metadata.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ibis/impala/metadata.py b/ibis/impala/metadata.py
--- a/ibis/impala/metadata.py
+++ b/ibis/impala/metadata.py
@@ -43,14 +43,16 @@
def _try_timestamp(x):
try:
- return pd.Timestamp(x)
+ ts = pd.Timestamp(x)
+ return ts.to_pydatetime()
except (ValueError, TypeError):
return x
def _try_unix_timestamp(x):
try:
- return pd.Timestamp.fromtimestamp(int(x))
+ ts = pd.Timestamp.fromtimestamp(int(x))
+ return ts.to_pydatetime()
except (ValueError, TypeError):
return x
| {"golden_diff": "diff --git a/ibis/impala/metadata.py b/ibis/impala/metadata.py\n--- a/ibis/impala/metadata.py\n+++ b/ibis/impala/metadata.py\n@@ -43,14 +43,16 @@\n \n def _try_timestamp(x):\n try:\n- return pd.Timestamp(x)\n+ ts = pd.Timestamp(x)\n+ return ts.to_pydatetime()\n except (ValueError, TypeError):\n return x\n \n \n def _try_unix_timestamp(x):\n try:\n- return pd.Timestamp.fromtimestamp(int(x))\n+ ts = pd.Timestamp.fromtimestamp(int(x))\n+ return ts.to_pydatetime()\n except (ValueError, TypeError):\n return x\n", "issue": "ImpalaTable.describe_formatted may throw an exception\nexcerpt. Will investigate soon\n\n``` python\n---------------------------------------------------------------------------\nTypeError Traceback (most recent call last)\n<ipython-input-115-87d40e40503e> in <module>()\n----> 1 repr(db.csv_as_parquet.describe_formatted())\n\n/home/wesm/code/cloudera/ibis/ibis/impala/metadata.py in __repr__(self)\n 295 data['partition schema'] = self.partitions\n 296 \n--> 297 pprint.pprint(data, stream=buf)\n 298 \n 299 return buf.getvalue()\n\n/home/wesm/anaconda3/lib/python3.5/pprint.py in pprint(object, stream, indent, width, depth, compact)\n 51 stream=stream, indent=indent, width=width, depth=depth,\n 52 compact=compact)\n---> 53 printer.pprint(object)\n 54 \n 55 def pformat(object, indent=1, width=80, depth=None, *, compact=False):\n```\n\n", "before_files": [{"content": "# Copyright 2014 Cloudera Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom six import StringIO\nimport pandas as pd\n\n\ndef parse_metadata(descr_table):\n parser = MetadataParser(descr_table)\n return parser.parse()\n\n\ndef _noop(tup):\n return None\n\n\ndef _item_converter(i):\n def _get_item(converter=None):\n def _converter(tup):\n result = tup[i]\n if converter is not None:\n result = converter(result)\n return result\n\n return _converter\n\n return _get_item\n\n_get_type = _item_converter(1)\n_get_comment = _item_converter(2)\n\n\ndef _try_timestamp(x):\n try:\n return pd.Timestamp(x)\n except (ValueError, TypeError):\n return x\n\n\ndef _try_unix_timestamp(x):\n try:\n return pd.Timestamp.fromtimestamp(int(x))\n except (ValueError, TypeError):\n return x\n\n\ndef _try_boolean(x):\n try:\n x = x.lower()\n if x in ('true', 'yes'):\n return True\n elif x in ('false', 'no'):\n return False\n return x\n except (ValueError, TypeError):\n return x\n\n\ndef _try_int(x):\n try:\n return int(x)\n except (ValueError, TypeError):\n return x\n\n\nclass MetadataParser(object):\n\n \"\"\"\n A simple state-ish machine to parse the results of DESCRIBE FORMATTED\n \"\"\"\n\n def __init__(self, table):\n self.table = table\n self.tuples = list(self.table.itertuples(index=False))\n\n def _reset(self):\n self.pos = 0\n self.schema = None\n self.partitions = None\n self.info = None\n self.storage = None\n\n def _next_tuple(self):\n if self.pos == len(self.tuples):\n raise StopIteration\n\n result = self.tuples[self.pos]\n self.pos += 1\n return result\n\n def parse(self):\n self._reset()\n self._parse()\n\n return TableMetadata(self.schema, self.info, self.storage,\n partitions=self.partitions)\n\n def _parse(self):\n self.schema = self._parse_schema()\n\n next_section = self._next_tuple()\n if 'partition' in next_section[0].lower():\n self._parse_partitions()\n else:\n self._parse_info()\n\n def _parse_partitions(self):\n self.partitions = self._parse_schema()\n\n next_section = self._next_tuple()\n if 'table information' not in next_section[0].lower():\n raise ValueError('Table information not present')\n\n self._parse_info()\n\n def _parse_schema(self):\n tup = self._next_tuple()\n if 'col_name' not in tup[0]:\n raise ValueError('DESCRIBE FORMATTED did not return '\n 'the expected results: {0}'\n .format(tup))\n self._next_tuple()\n\n # Use for both main schema and partition schema (if any)\n schema = []\n while True:\n tup = self._next_tuple()\n if tup[0].strip() == '':\n break\n schema.append((tup[0], tup[1]))\n\n return schema\n\n def _parse_info(self):\n self.info = {}\n while True:\n tup = self._next_tuple()\n orig_key = tup[0].strip(':')\n key = _clean_param_name(tup[0])\n\n if key == '' or key.startswith('#'):\n # section is done\n break\n\n if key == 'table parameters':\n self._parse_table_parameters()\n elif key in self._info_cleaners:\n result = self._info_cleaners[key](tup)\n self.info[orig_key] = result\n else:\n self.info[orig_key] = tup[1]\n\n if 'storage information' not in key:\n raise ValueError('Storage information not present')\n\n self._parse_storage_info()\n\n _info_cleaners = {\n 'database': _get_type(),\n 'owner': _get_type(),\n 'createtime': _get_type(_try_timestamp),\n 'lastaccesstime': _get_type(_try_timestamp),\n 'protect mode': _get_type(),\n 'retention': _get_type(_try_int),\n 'location': _get_type(),\n 'table type': _get_type()\n }\n\n def _parse_table_parameters(self):\n params = self._parse_nested_params(self._table_param_cleaners)\n self.info['Table Parameters'] = params\n\n _table_param_cleaners = {\n 'external': _try_boolean,\n 'column_stats_accurate': _try_boolean,\n 'numfiles': _try_int,\n 'totalsize': _try_int,\n 'stats_generated_via_stats_task': _try_boolean,\n 'numrows': _try_int,\n 'transient_lastddltime': _try_unix_timestamp,\n }\n\n def _parse_storage_info(self):\n self.storage = {}\n while True:\n # end of the road\n try:\n tup = self._next_tuple()\n except StopIteration:\n break\n\n orig_key = tup[0].strip(':')\n key = _clean_param_name(tup[0])\n\n if key == '' or key.startswith('#'):\n # section is done\n break\n\n if key == 'storage desc params':\n self._parse_storage_desc_params()\n elif key in self._storage_cleaners:\n result = self._storage_cleaners[key](tup)\n self.storage[orig_key] = result\n else:\n self.storage[orig_key] = tup[1]\n\n _storage_cleaners = {\n 'compressed': _get_type(_try_boolean),\n 'num buckets': _get_type(_try_int),\n }\n\n def _parse_storage_desc_params(self):\n params = self._parse_nested_params(self._storage_param_cleaners)\n self.storage['Desc Params'] = params\n\n _storage_param_cleaners = {}\n\n def _parse_nested_params(self, cleaners):\n params = {}\n while True:\n try:\n tup = self._next_tuple()\n except StopIteration:\n break\n if pd.isnull(tup[1]):\n break\n\n key, value = tup[1:]\n\n if key.lower() in cleaners:\n cleaner = cleaners[key.lower()]\n value = cleaner(value)\n params[key] = value\n\n return params\n\n\ndef _clean_param_name(x):\n return x.strip().strip(':').lower()\n\n\ndef _get_meta(attr, key):\n @property\n def f(self):\n data = getattr(self, attr)\n if isinstance(key, list):\n result = data\n for k in key:\n if k not in result:\n raise KeyError(k)\n result = result[k]\n return result\n else:\n return data[key]\n return f\n\n\nclass TableMetadata(object):\n\n \"\"\"\n Container for the parsed and wrangled results of DESCRIBE FORMATTED for\n easier Ibis use (and testing).\n \"\"\"\n def __init__(self, schema, info, storage, partitions=None):\n self.schema = schema\n self.info = info\n self.storage = storage\n self.partitions = partitions\n\n def __repr__(self):\n import pprint\n\n # Quick and dirty for now\n buf = StringIO()\n buf.write(str(type(self)))\n buf.write('\\n')\n\n data = {\n 'schema': self.schema,\n 'info': self.info,\n 'storage info': self.storage\n }\n if self.partitions is not None:\n data['partition schema'] = self.partitions\n\n pprint.pprint(data, stream=buf)\n\n return buf.getvalue()\n\n @property\n def is_partitioned(self):\n return self.partitions is not None\n\n create_time = _get_meta('info', 'CreateTime')\n location = _get_meta('info', 'Location')\n owner = _get_meta('info', 'Owner')\n num_rows = _get_meta('info', ['Table Parameters', 'numRows'])\n hive_format = _get_meta('storage', 'InputFormat')\n\n tbl_properties = _get_meta('info', 'Table Parameters')\n serde_properties = _get_meta('storage', 'Desc Params')\n\n\nclass TableInfo(object):\n pass\n\n\nclass TableStorageInfo(object):\n pass\n", "path": "ibis/impala/metadata.py"}]} | 3,591 | 159 |
gh_patches_debug_9255 | rasdani/github-patches | git_diff | encode__httpx-2999 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ASGI `raw_path` scope key should not include the query string portion.
I ran into a bug where I had written code that assumed `raw_path` as provided by Uvicorn would include the query string, where it turns out not to.
My tests didn't catch this because they were exercising the code using HTTPX ASGI emulation, and it turns out HTTPX thinks that `raw_path` DOES include the query string.
In Uvicorn: https://github.com/encode/uvicorn/blob/93bb8d3879808ae376b57e3721cc227fce2c27c1/uvicorn/protocols/http/h11_impl.py#L207
```python
raw_path, _, query_string = event.target.partition(b"?")
```
But in HTTPX: https://github.com/encode/httpx/blob/9415af643f23600403740baad0a466edc5cdbec1/httpx/_urls.py#L277-L292
I'm pretty confident HTTPX is incorrect about this. The ASGI spec (coincidentally the one bit of it I contributed directly to) says: https://asgi.readthedocs.io/en/latest/specs/www.html#http-connection-scope
> `raw_path` (*byte string*) -- The original HTTP path component unmodified from the bytes that were received by the web server.
On reading it now I realize this is a little ambiguous.
Daphne (the closest we have to a reference implementation of ASGI) backs up the idea that `raw_path` and `path` should be almost identical except for their encoding: https://github.com/django/daphne/blob/e49c39a4e5fac8ec170dd653641a9e90844fd3f1/daphne/ws_protocol.py#L77C1-L78
```python
"path": unquote(self.path.decode("ascii")),
"raw_path": self.path,
```
</issue>
<code>
[start of httpx/_transports/asgi.py]
1 import typing
2
3 import sniffio
4
5 from .._models import Request, Response
6 from .._types import AsyncByteStream
7 from .base import AsyncBaseTransport
8
9 if typing.TYPE_CHECKING: # pragma: no cover
10 import asyncio
11
12 import trio
13
14 Event = typing.Union[asyncio.Event, trio.Event]
15
16
17 _Message = typing.Dict[str, typing.Any]
18 _Receive = typing.Callable[[], typing.Awaitable[_Message]]
19 _Send = typing.Callable[
20 [typing.Dict[str, typing.Any]], typing.Coroutine[None, None, None]
21 ]
22 _ASGIApp = typing.Callable[
23 [typing.Dict[str, typing.Any], _Receive, _Send], typing.Coroutine[None, None, None]
24 ]
25
26
27 def create_event() -> "Event":
28 if sniffio.current_async_library() == "trio":
29 import trio
30
31 return trio.Event()
32 else:
33 import asyncio
34
35 return asyncio.Event()
36
37
38 class ASGIResponseStream(AsyncByteStream):
39 def __init__(self, body: typing.List[bytes]) -> None:
40 self._body = body
41
42 async def __aiter__(self) -> typing.AsyncIterator[bytes]:
43 yield b"".join(self._body)
44
45
46 class ASGITransport(AsyncBaseTransport):
47 """
48 A custom AsyncTransport that handles sending requests directly to an ASGI app.
49 The simplest way to use this functionality is to use the `app` argument.
50
51 ```
52 client = httpx.AsyncClient(app=app)
53 ```
54
55 Alternatively, you can setup the transport instance explicitly.
56 This allows you to include any additional configuration arguments specific
57 to the ASGITransport class:
58
59 ```
60 transport = httpx.ASGITransport(
61 app=app,
62 root_path="/submount",
63 client=("1.2.3.4", 123)
64 )
65 client = httpx.AsyncClient(transport=transport)
66 ```
67
68 Arguments:
69
70 * `app` - The ASGI application.
71 * `raise_app_exceptions` - Boolean indicating if exceptions in the application
72 should be raised. Default to `True`. Can be set to `False` for use cases
73 such as testing the content of a client 500 response.
74 * `root_path` - The root path on which the ASGI application should be mounted.
75 * `client` - A two-tuple indicating the client IP and port of incoming requests.
76 ```
77 """
78
79 def __init__(
80 self,
81 app: _ASGIApp,
82 raise_app_exceptions: bool = True,
83 root_path: str = "",
84 client: typing.Tuple[str, int] = ("127.0.0.1", 123),
85 ) -> None:
86 self.app = app
87 self.raise_app_exceptions = raise_app_exceptions
88 self.root_path = root_path
89 self.client = client
90
91 async def handle_async_request(
92 self,
93 request: Request,
94 ) -> Response:
95 assert isinstance(request.stream, AsyncByteStream)
96
97 # ASGI scope.
98 scope = {
99 "type": "http",
100 "asgi": {"version": "3.0"},
101 "http_version": "1.1",
102 "method": request.method,
103 "headers": [(k.lower(), v) for (k, v) in request.headers.raw],
104 "scheme": request.url.scheme,
105 "path": request.url.path,
106 "raw_path": request.url.raw_path,
107 "query_string": request.url.query,
108 "server": (request.url.host, request.url.port),
109 "client": self.client,
110 "root_path": self.root_path,
111 }
112
113 # Request.
114 request_body_chunks = request.stream.__aiter__()
115 request_complete = False
116
117 # Response.
118 status_code = None
119 response_headers = None
120 body_parts = []
121 response_started = False
122 response_complete = create_event()
123
124 # ASGI callables.
125
126 async def receive() -> typing.Dict[str, typing.Any]:
127 nonlocal request_complete
128
129 if request_complete:
130 await response_complete.wait()
131 return {"type": "http.disconnect"}
132
133 try:
134 body = await request_body_chunks.__anext__()
135 except StopAsyncIteration:
136 request_complete = True
137 return {"type": "http.request", "body": b"", "more_body": False}
138 return {"type": "http.request", "body": body, "more_body": True}
139
140 async def send(message: typing.Dict[str, typing.Any]) -> None:
141 nonlocal status_code, response_headers, response_started
142
143 if message["type"] == "http.response.start":
144 assert not response_started
145
146 status_code = message["status"]
147 response_headers = message.get("headers", [])
148 response_started = True
149
150 elif message["type"] == "http.response.body":
151 assert not response_complete.is_set()
152 body = message.get("body", b"")
153 more_body = message.get("more_body", False)
154
155 if body and request.method != "HEAD":
156 body_parts.append(body)
157
158 if not more_body:
159 response_complete.set()
160
161 try:
162 await self.app(scope, receive, send)
163 except Exception: # noqa: PIE-786
164 if self.raise_app_exceptions:
165 raise
166
167 response_complete.set()
168 if status_code is None:
169 status_code = 500
170 if response_headers is None:
171 response_headers = {}
172
173 assert response_complete.is_set()
174 assert status_code is not None
175 assert response_headers is not None
176
177 stream = ASGIResponseStream(body_parts)
178
179 return Response(status_code, headers=response_headers, stream=stream)
180
[end of httpx/_transports/asgi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/httpx/_transports/asgi.py b/httpx/_transports/asgi.py
--- a/httpx/_transports/asgi.py
+++ b/httpx/_transports/asgi.py
@@ -103,7 +103,7 @@
"headers": [(k.lower(), v) for (k, v) in request.headers.raw],
"scheme": request.url.scheme,
"path": request.url.path,
- "raw_path": request.url.raw_path,
+ "raw_path": request.url.raw_path.split(b"?")[0],
"query_string": request.url.query,
"server": (request.url.host, request.url.port),
"client": self.client,
| {"golden_diff": "diff --git a/httpx/_transports/asgi.py b/httpx/_transports/asgi.py\n--- a/httpx/_transports/asgi.py\n+++ b/httpx/_transports/asgi.py\n@@ -103,7 +103,7 @@\n \"headers\": [(k.lower(), v) for (k, v) in request.headers.raw],\n \"scheme\": request.url.scheme,\n \"path\": request.url.path,\n- \"raw_path\": request.url.raw_path,\n+ \"raw_path\": request.url.raw_path.split(b\"?\")[0],\n \"query_string\": request.url.query,\n \"server\": (request.url.host, request.url.port),\n \"client\": self.client,\n", "issue": "ASGI `raw_path` scope key should not include the query string portion.\nI ran into a bug where I had written code that assumed `raw_path` as provided by Uvicorn would include the query string, where it turns out not to.\r\n\r\nMy tests didn't catch this because they were exercising the code using HTTPX ASGI emulation, and it turns out HTTPX thinks that `raw_path` DOES include the query string.\r\n\r\nIn Uvicorn: https://github.com/encode/uvicorn/blob/93bb8d3879808ae376b57e3721cc227fce2c27c1/uvicorn/protocols/http/h11_impl.py#L207\r\n```python\r\n raw_path, _, query_string = event.target.partition(b\"?\")\r\n```\r\nBut in HTTPX: https://github.com/encode/httpx/blob/9415af643f23600403740baad0a466edc5cdbec1/httpx/_urls.py#L277-L292\r\n\r\nI'm pretty confident HTTPX is incorrect about this. The ASGI spec (coincidentally the one bit of it I contributed directly to) says: https://asgi.readthedocs.io/en/latest/specs/www.html#http-connection-scope\r\n\r\n> `raw_path` (*byte string*) -- The original HTTP path component unmodified from the bytes that were received by the web server.\r\n\r\nOn reading it now I realize this is a little ambiguous.\r\n\r\nDaphne (the closest we have to a reference implementation of ASGI) backs up the idea that `raw_path` and `path` should be almost identical except for their encoding: https://github.com/django/daphne/blob/e49c39a4e5fac8ec170dd653641a9e90844fd3f1/daphne/ws_protocol.py#L77C1-L78\r\n\r\n```python\r\n \"path\": unquote(self.path.decode(\"ascii\")),\r\n \"raw_path\": self.path,\r\n```\n", "before_files": [{"content": "import typing\n\nimport sniffio\n\nfrom .._models import Request, Response\nfrom .._types import AsyncByteStream\nfrom .base import AsyncBaseTransport\n\nif typing.TYPE_CHECKING: # pragma: no cover\n import asyncio\n\n import trio\n\n Event = typing.Union[asyncio.Event, trio.Event]\n\n\n_Message = typing.Dict[str, typing.Any]\n_Receive = typing.Callable[[], typing.Awaitable[_Message]]\n_Send = typing.Callable[\n [typing.Dict[str, typing.Any]], typing.Coroutine[None, None, None]\n]\n_ASGIApp = typing.Callable[\n [typing.Dict[str, typing.Any], _Receive, _Send], typing.Coroutine[None, None, None]\n]\n\n\ndef create_event() -> \"Event\":\n if sniffio.current_async_library() == \"trio\":\n import trio\n\n return trio.Event()\n else:\n import asyncio\n\n return asyncio.Event()\n\n\nclass ASGIResponseStream(AsyncByteStream):\n def __init__(self, body: typing.List[bytes]) -> None:\n self._body = body\n\n async def __aiter__(self) -> typing.AsyncIterator[bytes]:\n yield b\"\".join(self._body)\n\n\nclass ASGITransport(AsyncBaseTransport):\n \"\"\"\n A custom AsyncTransport that handles sending requests directly to an ASGI app.\n The simplest way to use this functionality is to use the `app` argument.\n\n ```\n client = httpx.AsyncClient(app=app)\n ```\n\n Alternatively, you can setup the transport instance explicitly.\n This allows you to include any additional configuration arguments specific\n to the ASGITransport class:\n\n ```\n transport = httpx.ASGITransport(\n app=app,\n root_path=\"/submount\",\n client=(\"1.2.3.4\", 123)\n )\n client = httpx.AsyncClient(transport=transport)\n ```\n\n Arguments:\n\n * `app` - The ASGI application.\n * `raise_app_exceptions` - Boolean indicating if exceptions in the application\n should be raised. Default to `True`. Can be set to `False` for use cases\n such as testing the content of a client 500 response.\n * `root_path` - The root path on which the ASGI application should be mounted.\n * `client` - A two-tuple indicating the client IP and port of incoming requests.\n ```\n \"\"\"\n\n def __init__(\n self,\n app: _ASGIApp,\n raise_app_exceptions: bool = True,\n root_path: str = \"\",\n client: typing.Tuple[str, int] = (\"127.0.0.1\", 123),\n ) -> None:\n self.app = app\n self.raise_app_exceptions = raise_app_exceptions\n self.root_path = root_path\n self.client = client\n\n async def handle_async_request(\n self,\n request: Request,\n ) -> Response:\n assert isinstance(request.stream, AsyncByteStream)\n\n # ASGI scope.\n scope = {\n \"type\": \"http\",\n \"asgi\": {\"version\": \"3.0\"},\n \"http_version\": \"1.1\",\n \"method\": request.method,\n \"headers\": [(k.lower(), v) for (k, v) in request.headers.raw],\n \"scheme\": request.url.scheme,\n \"path\": request.url.path,\n \"raw_path\": request.url.raw_path,\n \"query_string\": request.url.query,\n \"server\": (request.url.host, request.url.port),\n \"client\": self.client,\n \"root_path\": self.root_path,\n }\n\n # Request.\n request_body_chunks = request.stream.__aiter__()\n request_complete = False\n\n # Response.\n status_code = None\n response_headers = None\n body_parts = []\n response_started = False\n response_complete = create_event()\n\n # ASGI callables.\n\n async def receive() -> typing.Dict[str, typing.Any]:\n nonlocal request_complete\n\n if request_complete:\n await response_complete.wait()\n return {\"type\": \"http.disconnect\"}\n\n try:\n body = await request_body_chunks.__anext__()\n except StopAsyncIteration:\n request_complete = True\n return {\"type\": \"http.request\", \"body\": b\"\", \"more_body\": False}\n return {\"type\": \"http.request\", \"body\": body, \"more_body\": True}\n\n async def send(message: typing.Dict[str, typing.Any]) -> None:\n nonlocal status_code, response_headers, response_started\n\n if message[\"type\"] == \"http.response.start\":\n assert not response_started\n\n status_code = message[\"status\"]\n response_headers = message.get(\"headers\", [])\n response_started = True\n\n elif message[\"type\"] == \"http.response.body\":\n assert not response_complete.is_set()\n body = message.get(\"body\", b\"\")\n more_body = message.get(\"more_body\", False)\n\n if body and request.method != \"HEAD\":\n body_parts.append(body)\n\n if not more_body:\n response_complete.set()\n\n try:\n await self.app(scope, receive, send)\n except Exception: # noqa: PIE-786\n if self.raise_app_exceptions:\n raise\n\n response_complete.set()\n if status_code is None:\n status_code = 500\n if response_headers is None:\n response_headers = {}\n\n assert response_complete.is_set()\n assert status_code is not None\n assert response_headers is not None\n\n stream = ASGIResponseStream(body_parts)\n\n return Response(status_code, headers=response_headers, stream=stream)\n", "path": "httpx/_transports/asgi.py"}]} | 2,667 | 151 |
gh_patches_debug_5674 | rasdani/github-patches | git_diff | mozilla__bugbug-1214 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Log number of spam/non-spam bugs in SpamBug get_labels
</issue>
<code>
[start of bugbug/models/spambug.py]
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import xgboost
7 from imblearn.under_sampling import RandomUnderSampler
8 from sklearn.compose import ColumnTransformer
9 from sklearn.feature_extraction import DictVectorizer
10 from sklearn.pipeline import Pipeline
11
12 from bugbug import bug_features, bugzilla, feature_cleanup
13 from bugbug.model import BugModel
14
15
16 class SpamBugModel(BugModel):
17 def __init__(self, lemmatization=False):
18 BugModel.__init__(self, lemmatization)
19
20 self.sampler = RandomUnderSampler(random_state=0)
21
22 feature_extractors = [
23 bug_features.has_str(),
24 bug_features.has_regression_range(),
25 bug_features.severity(),
26 bug_features.is_coverity_issue(),
27 bug_features.has_crash_signature(),
28 bug_features.has_url(),
29 bug_features.has_w3c_url(),
30 bug_features.has_github_url(),
31 bug_features.whiteboard(),
32 bug_features.patches(),
33 bug_features.landings(),
34 bug_features.product(),
35 bug_features.component(),
36 bug_features.num_words_title(),
37 bug_features.num_words_comments(),
38 bug_features.keywords(),
39 ]
40
41 cleanup_functions = [
42 feature_cleanup.fileref(),
43 feature_cleanup.url(),
44 feature_cleanup.synonyms(),
45 ]
46
47 self.extraction_pipeline = Pipeline(
48 [
49 (
50 "bug_extractor",
51 bug_features.BugExtractor(
52 feature_extractors, cleanup_functions, rollback=True
53 ),
54 ),
55 (
56 "union",
57 ColumnTransformer(
58 [
59 ("data", DictVectorizer(), "data"),
60 ("title", self.text_vectorizer(), "title"),
61 ("comments", self.text_vectorizer(), "comments"),
62 ]
63 ),
64 ),
65 ]
66 )
67
68 self.clf = xgboost.XGBClassifier(n_jobs=16)
69 self.clf.set_params(predictor="cpu_predictor")
70
71 def get_labels(self):
72 classes = {}
73
74 for bug_data in bugzilla.get_bugs(include_invalid=True):
75 bug_id = bug_data["id"]
76
77 # Legitimate bugs
78 if bug_data["resolution"] == "FIXED":
79 classes[bug_id] = 0
80
81 # Spam bugs
82 elif (
83 bug_data["product"] == "Invalid Bugs"
84 and bug_data["component"] == "General"
85 ):
86 classes[bug_id] = 1
87
88 return classes, [0, 1]
89
90 def items_gen(self, classes):
91 # Overwriting this method to add include_invalid=True to get_bugs to
92 # include spam bugs.
93 return (
94 (bug, classes[bug["id"]])
95 for bug in bugzilla.get_bugs(include_invalid=True)
96 if bug["id"] in classes
97 )
98
99 def get_feature_names(self):
100 return self.extraction_pipeline.named_steps["union"].get_feature_names()
101
102 def overwrite_classes(self, bugs, classes, probabilities):
103 for (i, bug) in enumerate(bugs):
104 if "@mozilla" in bug["creator"]:
105 if probabilities:
106 classes[i] = [1.0, 0.0]
107 else:
108 classes[i] = 0
109
110 return classes
111
[end of bugbug/models/spambug.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bugbug/models/spambug.py b/bugbug/models/spambug.py
--- a/bugbug/models/spambug.py
+++ b/bugbug/models/spambug.py
@@ -85,6 +85,17 @@
):
classes[bug_id] = 1
+ print(
+ "{} bugs are classified as non-spam".format(
+ sum(1 for label in classes.values() if label == 0)
+ )
+ )
+ print(
+ "{} bugs are classified as spam".format(
+ sum(1 for label in classes.values() if label == 1)
+ )
+ )
+
return classes, [0, 1]
def items_gen(self, classes):
| {"golden_diff": "diff --git a/bugbug/models/spambug.py b/bugbug/models/spambug.py\n--- a/bugbug/models/spambug.py\n+++ b/bugbug/models/spambug.py\n@@ -85,6 +85,17 @@\n ):\n classes[bug_id] = 1\n \n+ print(\n+ \"{} bugs are classified as non-spam\".format(\n+ sum(1 for label in classes.values() if label == 0)\n+ )\n+ )\n+ print(\n+ \"{} bugs are classified as spam\".format(\n+ sum(1 for label in classes.values() if label == 1)\n+ )\n+ )\n+\n return classes, [0, 1]\n \n def items_gen(self, classes):\n", "issue": "Log number of spam/non-spam bugs in SpamBug get_labels\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport xgboost\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features, bugzilla, feature_cleanup\nfrom bugbug.model import BugModel\n\n\nclass SpamBugModel(BugModel):\n def __init__(self, lemmatization=False):\n BugModel.__init__(self, lemmatization)\n\n self.sampler = RandomUnderSampler(random_state=0)\n\n feature_extractors = [\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n bug_features.is_coverity_issue(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n bug_features.has_w3c_url(),\n bug_features.has_github_url(),\n bug_features.whiteboard(),\n bug_features.patches(),\n bug_features.landings(),\n bug_features.product(),\n bug_features.component(),\n bug_features.num_words_title(),\n bug_features.num_words_comments(),\n bug_features.keywords(),\n ]\n\n cleanup_functions = [\n feature_cleanup.fileref(),\n feature_cleanup.url(),\n feature_cleanup.synonyms(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\n \"bug_extractor\",\n bug_features.BugExtractor(\n feature_extractors, cleanup_functions, rollback=True\n ),\n ),\n (\n \"union\",\n ColumnTransformer(\n [\n (\"data\", DictVectorizer(), \"data\"),\n (\"title\", self.text_vectorizer(), \"title\"),\n (\"comments\", self.text_vectorizer(), \"comments\"),\n ]\n ),\n ),\n ]\n )\n\n self.clf = xgboost.XGBClassifier(n_jobs=16)\n self.clf.set_params(predictor=\"cpu_predictor\")\n\n def get_labels(self):\n classes = {}\n\n for bug_data in bugzilla.get_bugs(include_invalid=True):\n bug_id = bug_data[\"id\"]\n\n # Legitimate bugs\n if bug_data[\"resolution\"] == \"FIXED\":\n classes[bug_id] = 0\n\n # Spam bugs\n elif (\n bug_data[\"product\"] == \"Invalid Bugs\"\n and bug_data[\"component\"] == \"General\"\n ):\n classes[bug_id] = 1\n\n return classes, [0, 1]\n\n def items_gen(self, classes):\n # Overwriting this method to add include_invalid=True to get_bugs to\n # include spam bugs.\n return (\n (bug, classes[bug[\"id\"]])\n for bug in bugzilla.get_bugs(include_invalid=True)\n if bug[\"id\"] in classes\n )\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps[\"union\"].get_feature_names()\n\n def overwrite_classes(self, bugs, classes, probabilities):\n for (i, bug) in enumerate(bugs):\n if \"@mozilla\" in bug[\"creator\"]:\n if probabilities:\n classes[i] = [1.0, 0.0]\n else:\n classes[i] = 0\n\n return classes\n", "path": "bugbug/models/spambug.py"}]} | 1,496 | 167 |
gh_patches_debug_32358 | rasdani/github-patches | git_diff | deepset-ai__haystack-5809 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change `MemoryEmbeddingRetriever` to non-batch mode
</issue>
<code>
[start of haystack/preview/components/retrievers/memory.py]
1 from typing import Dict, List, Any, Optional
2
3 from haystack.preview import component, Document, default_to_dict, default_from_dict, DeserializationError
4 from haystack.preview.document_stores import MemoryDocumentStore, document_store
5
6
7 @component
8 class MemoryBM25Retriever:
9 """
10 A component for retrieving documents from a MemoryDocumentStore using the BM25 algorithm.
11
12 Needs to be connected to a MemoryDocumentStore to run.
13 """
14
15 def __init__(
16 self,
17 document_store: MemoryDocumentStore,
18 filters: Optional[Dict[str, Any]] = None,
19 top_k: int = 10,
20 scale_score: bool = True,
21 ):
22 """
23 Create a MemoryBM25Retriever component.
24
25 :param document_store: An instance of MemoryDocumentStore.
26 :param filters: A dictionary with filters to narrow down the search space. Default is None.
27 :param top_k: The maximum number of documents to retrieve. Default is 10.
28 :param scale_score: Whether to scale the BM25 score or not. Default is True.
29
30 :raises ValueError: If the specified top_k is not > 0.
31 """
32 if not isinstance(document_store, MemoryDocumentStore):
33 raise ValueError("document_store must be an instance of MemoryDocumentStore")
34
35 self.document_store = document_store
36
37 if top_k <= 0:
38 raise ValueError(f"top_k must be > 0, but got {top_k}")
39
40 self.filters = filters
41 self.top_k = top_k
42 self.scale_score = scale_score
43
44 def to_dict(self) -> Dict[str, Any]:
45 """
46 Serialize this component to a dictionary.
47 """
48 docstore = self.document_store.to_dict()
49 return default_to_dict(
50 self, document_store=docstore, filters=self.filters, top_k=self.top_k, scale_score=self.scale_score
51 )
52
53 @classmethod
54 def from_dict(cls, data: Dict[str, Any]) -> "MemoryBM25Retriever":
55 """
56 Deserialize this component from a dictionary.
57 """
58 init_params = data.get("init_parameters", {})
59 if "document_store" not in init_params:
60 raise DeserializationError("Missing 'document_store' in serialization data")
61 if "type" not in init_params["document_store"]:
62 raise DeserializationError("Missing 'type' in document store's serialization data")
63 if init_params["document_store"]["type"] not in document_store.registry:
64 raise DeserializationError(f"DocumentStore type '{init_params['document_store']['type']}' not found")
65
66 docstore_class = document_store.registry[init_params["document_store"]["type"]]
67 docstore = docstore_class.from_dict(init_params["document_store"])
68 data["init_parameters"]["document_store"] = docstore
69 return default_from_dict(cls, data)
70
71 @component.output_types(documents=List[Document])
72 def run(
73 self,
74 query: str,
75 filters: Optional[Dict[str, Any]] = None,
76 top_k: Optional[int] = None,
77 scale_score: Optional[bool] = None,
78 ):
79 """
80 Run the MemoryBM25Retriever on the given input data.
81
82 :param query: The query string for the retriever.
83 :param filters: A dictionary with filters to narrow down the search space.
84 :param top_k: The maximum number of documents to return.
85 :param scale_score: Whether to scale the BM25 scores or not.
86 :return: The retrieved documents.
87
88 :raises ValueError: If the specified DocumentStore is not found or is not a MemoryDocumentStore instance.
89 """
90 if filters is None:
91 filters = self.filters
92 if top_k is None:
93 top_k = self.top_k
94 if scale_score is None:
95 scale_score = self.scale_score
96
97 docs = self.document_store.bm25_retrieval(query=query, filters=filters, top_k=top_k, scale_score=scale_score)
98 return {"documents": docs}
99
100
101 @component
102 class MemoryEmbeddingRetriever:
103 """
104 A component for retrieving documents from a MemoryDocumentStore using a vector similarity metric.
105
106 Needs to be connected to a MemoryDocumentStore to run.
107 """
108
109 def __init__(
110 self,
111 document_store: MemoryDocumentStore,
112 filters: Optional[Dict[str, Any]] = None,
113 top_k: int = 10,
114 scale_score: bool = True,
115 return_embedding: bool = False,
116 ):
117 """
118 Create a MemoryEmbeddingRetriever component.
119
120 :param document_store: An instance of MemoryDocumentStore.
121 :param filters: A dictionary with filters to narrow down the search space. Default is None.
122 :param top_k: The maximum number of documents to retrieve. Default is 10.
123 :param scale_score: Whether to scale the scores of the retrieved documents or not. Default is True.
124 :param return_embedding: Whether to return the embedding of the retrieved Documents. Default is False.
125
126 :raises ValueError: If the specified top_k is not > 0.
127 """
128 if not isinstance(document_store, MemoryDocumentStore):
129 raise ValueError("document_store must be an instance of MemoryDocumentStore")
130
131 self.document_store = document_store
132
133 if top_k <= 0:
134 raise ValueError(f"top_k must be > 0, but got {top_k}")
135
136 self.filters = filters
137 self.top_k = top_k
138 self.scale_score = scale_score
139 self.return_embedding = return_embedding
140
141 def to_dict(self) -> Dict[str, Any]:
142 """
143 Serialize this component to a dictionary.
144 """
145 docstore = self.document_store.to_dict()
146 return default_to_dict(
147 self,
148 document_store=docstore,
149 filters=self.filters,
150 top_k=self.top_k,
151 scale_score=self.scale_score,
152 return_embedding=self.return_embedding,
153 )
154
155 @classmethod
156 def from_dict(cls, data: Dict[str, Any]) -> "MemoryBM25Retriever":
157 """
158 Deserialize this component from a dictionary.
159 """
160 init_params = data.get("init_parameters", {})
161 if "document_store" not in init_params:
162 raise DeserializationError("Missing 'document_store' in serialization data")
163 if "type" not in init_params["document_store"]:
164 raise DeserializationError("Missing 'type' in document store's serialization data")
165 if init_params["document_store"]["type"] not in document_store.registry:
166 raise DeserializationError(f"DocumentStore type '{init_params['document_store']['type']}' not found")
167
168 docstore_class = document_store.registry[init_params["document_store"]["type"]]
169 docstore = docstore_class.from_dict(init_params["document_store"])
170 data["init_parameters"]["document_store"] = docstore
171 return default_from_dict(cls, data)
172
173 @component.output_types(documents=List[List[Document]])
174 def run(
175 self,
176 queries_embeddings: List[List[float]],
177 filters: Optional[Dict[str, Any]] = None,
178 top_k: Optional[int] = None,
179 scale_score: Optional[bool] = None,
180 return_embedding: Optional[bool] = None,
181 ):
182 """
183 Run the MemoryEmbeddingRetriever on the given input data.
184
185 :param queries_embeddings: Embeddings of the queries.
186 :param filters: A dictionary with filters to narrow down the search space.
187 :param top_k: The maximum number of documents to return.
188 :param scale_score: Whether to scale the scores of the retrieved documents or not.
189 :param return_embedding: Whether to return the embedding of the retrieved Documents.
190 :return: The retrieved documents.
191
192 :raises ValueError: If the specified DocumentStore is not found or is not a MemoryDocumentStore instance.
193 """
194 if filters is None:
195 filters = self.filters
196 if top_k is None:
197 top_k = self.top_k
198 if scale_score is None:
199 scale_score = self.scale_score
200 if return_embedding is None:
201 return_embedding = self.return_embedding
202
203 docs = []
204 for query_embedding in queries_embeddings:
205 docs.append(
206 self.document_store.embedding_retrieval(
207 query_embedding=query_embedding,
208 filters=filters,
209 top_k=top_k,
210 scale_score=scale_score,
211 return_embedding=return_embedding,
212 )
213 )
214 return {"documents": docs}
215
[end of haystack/preview/components/retrievers/memory.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/haystack/preview/components/retrievers/memory.py b/haystack/preview/components/retrievers/memory.py
--- a/haystack/preview/components/retrievers/memory.py
+++ b/haystack/preview/components/retrievers/memory.py
@@ -173,7 +173,7 @@
@component.output_types(documents=List[List[Document]])
def run(
self,
- queries_embeddings: List[List[float]],
+ query_embedding: List[float],
filters: Optional[Dict[str, Any]] = None,
top_k: Optional[int] = None,
scale_score: Optional[bool] = None,
@@ -182,7 +182,7 @@
"""
Run the MemoryEmbeddingRetriever on the given input data.
- :param queries_embeddings: Embeddings of the queries.
+ :param query_embedding: Embedding of the query.
:param filters: A dictionary with filters to narrow down the search space.
:param top_k: The maximum number of documents to return.
:param scale_score: Whether to scale the scores of the retrieved documents or not.
@@ -200,15 +200,12 @@
if return_embedding is None:
return_embedding = self.return_embedding
- docs = []
- for query_embedding in queries_embeddings:
- docs.append(
- self.document_store.embedding_retrieval(
- query_embedding=query_embedding,
- filters=filters,
- top_k=top_k,
- scale_score=scale_score,
- return_embedding=return_embedding,
- )
- )
+ docs = self.document_store.embedding_retrieval(
+ query_embedding=query_embedding,
+ filters=filters,
+ top_k=top_k,
+ scale_score=scale_score,
+ return_embedding=return_embedding,
+ )
+
return {"documents": docs}
| {"golden_diff": "diff --git a/haystack/preview/components/retrievers/memory.py b/haystack/preview/components/retrievers/memory.py\n--- a/haystack/preview/components/retrievers/memory.py\n+++ b/haystack/preview/components/retrievers/memory.py\n@@ -173,7 +173,7 @@\n @component.output_types(documents=List[List[Document]])\n def run(\n self,\n- queries_embeddings: List[List[float]],\n+ query_embedding: List[float],\n filters: Optional[Dict[str, Any]] = None,\n top_k: Optional[int] = None,\n scale_score: Optional[bool] = None,\n@@ -182,7 +182,7 @@\n \"\"\"\n Run the MemoryEmbeddingRetriever on the given input data.\n \n- :param queries_embeddings: Embeddings of the queries.\n+ :param query_embedding: Embedding of the query.\n :param filters: A dictionary with filters to narrow down the search space.\n :param top_k: The maximum number of documents to return.\n :param scale_score: Whether to scale the scores of the retrieved documents or not.\n@@ -200,15 +200,12 @@\n if return_embedding is None:\n return_embedding = self.return_embedding\n \n- docs = []\n- for query_embedding in queries_embeddings:\n- docs.append(\n- self.document_store.embedding_retrieval(\n- query_embedding=query_embedding,\n- filters=filters,\n- top_k=top_k,\n- scale_score=scale_score,\n- return_embedding=return_embedding,\n- )\n- )\n+ docs = self.document_store.embedding_retrieval(\n+ query_embedding=query_embedding,\n+ filters=filters,\n+ top_k=top_k,\n+ scale_score=scale_score,\n+ return_embedding=return_embedding,\n+ )\n+\n return {\"documents\": docs}\n", "issue": "Change `MemoryEmbeddingRetriever` to non-batch mode\n\n", "before_files": [{"content": "from typing import Dict, List, Any, Optional\n\nfrom haystack.preview import component, Document, default_to_dict, default_from_dict, DeserializationError\nfrom haystack.preview.document_stores import MemoryDocumentStore, document_store\n\n\n@component\nclass MemoryBM25Retriever:\n \"\"\"\n A component for retrieving documents from a MemoryDocumentStore using the BM25 algorithm.\n\n Needs to be connected to a MemoryDocumentStore to run.\n \"\"\"\n\n def __init__(\n self,\n document_store: MemoryDocumentStore,\n filters: Optional[Dict[str, Any]] = None,\n top_k: int = 10,\n scale_score: bool = True,\n ):\n \"\"\"\n Create a MemoryBM25Retriever component.\n\n :param document_store: An instance of MemoryDocumentStore.\n :param filters: A dictionary with filters to narrow down the search space. Default is None.\n :param top_k: The maximum number of documents to retrieve. Default is 10.\n :param scale_score: Whether to scale the BM25 score or not. Default is True.\n\n :raises ValueError: If the specified top_k is not > 0.\n \"\"\"\n if not isinstance(document_store, MemoryDocumentStore):\n raise ValueError(\"document_store must be an instance of MemoryDocumentStore\")\n\n self.document_store = document_store\n\n if top_k <= 0:\n raise ValueError(f\"top_k must be > 0, but got {top_k}\")\n\n self.filters = filters\n self.top_k = top_k\n self.scale_score = scale_score\n\n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n \"\"\"\n docstore = self.document_store.to_dict()\n return default_to_dict(\n self, document_store=docstore, filters=self.filters, top_k=self.top_k, scale_score=self.scale_score\n )\n\n @classmethod\n def from_dict(cls, data: Dict[str, Any]) -> \"MemoryBM25Retriever\":\n \"\"\"\n Deserialize this component from a dictionary.\n \"\"\"\n init_params = data.get(\"init_parameters\", {})\n if \"document_store\" not in init_params:\n raise DeserializationError(\"Missing 'document_store' in serialization data\")\n if \"type\" not in init_params[\"document_store\"]:\n raise DeserializationError(\"Missing 'type' in document store's serialization data\")\n if init_params[\"document_store\"][\"type\"] not in document_store.registry:\n raise DeserializationError(f\"DocumentStore type '{init_params['document_store']['type']}' not found\")\n\n docstore_class = document_store.registry[init_params[\"document_store\"][\"type\"]]\n docstore = docstore_class.from_dict(init_params[\"document_store\"])\n data[\"init_parameters\"][\"document_store\"] = docstore\n return default_from_dict(cls, data)\n\n @component.output_types(documents=List[Document])\n def run(\n self,\n query: str,\n filters: Optional[Dict[str, Any]] = None,\n top_k: Optional[int] = None,\n scale_score: Optional[bool] = None,\n ):\n \"\"\"\n Run the MemoryBM25Retriever on the given input data.\n\n :param query: The query string for the retriever.\n :param filters: A dictionary with filters to narrow down the search space.\n :param top_k: The maximum number of documents to return.\n :param scale_score: Whether to scale the BM25 scores or not.\n :return: The retrieved documents.\n\n :raises ValueError: If the specified DocumentStore is not found or is not a MemoryDocumentStore instance.\n \"\"\"\n if filters is None:\n filters = self.filters\n if top_k is None:\n top_k = self.top_k\n if scale_score is None:\n scale_score = self.scale_score\n\n docs = self.document_store.bm25_retrieval(query=query, filters=filters, top_k=top_k, scale_score=scale_score)\n return {\"documents\": docs}\n\n\n@component\nclass MemoryEmbeddingRetriever:\n \"\"\"\n A component for retrieving documents from a MemoryDocumentStore using a vector similarity metric.\n\n Needs to be connected to a MemoryDocumentStore to run.\n \"\"\"\n\n def __init__(\n self,\n document_store: MemoryDocumentStore,\n filters: Optional[Dict[str, Any]] = None,\n top_k: int = 10,\n scale_score: bool = True,\n return_embedding: bool = False,\n ):\n \"\"\"\n Create a MemoryEmbeddingRetriever component.\n\n :param document_store: An instance of MemoryDocumentStore.\n :param filters: A dictionary with filters to narrow down the search space. Default is None.\n :param top_k: The maximum number of documents to retrieve. Default is 10.\n :param scale_score: Whether to scale the scores of the retrieved documents or not. Default is True.\n :param return_embedding: Whether to return the embedding of the retrieved Documents. Default is False.\n\n :raises ValueError: If the specified top_k is not > 0.\n \"\"\"\n if not isinstance(document_store, MemoryDocumentStore):\n raise ValueError(\"document_store must be an instance of MemoryDocumentStore\")\n\n self.document_store = document_store\n\n if top_k <= 0:\n raise ValueError(f\"top_k must be > 0, but got {top_k}\")\n\n self.filters = filters\n self.top_k = top_k\n self.scale_score = scale_score\n self.return_embedding = return_embedding\n\n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n \"\"\"\n docstore = self.document_store.to_dict()\n return default_to_dict(\n self,\n document_store=docstore,\n filters=self.filters,\n top_k=self.top_k,\n scale_score=self.scale_score,\n return_embedding=self.return_embedding,\n )\n\n @classmethod\n def from_dict(cls, data: Dict[str, Any]) -> \"MemoryBM25Retriever\":\n \"\"\"\n Deserialize this component from a dictionary.\n \"\"\"\n init_params = data.get(\"init_parameters\", {})\n if \"document_store\" not in init_params:\n raise DeserializationError(\"Missing 'document_store' in serialization data\")\n if \"type\" not in init_params[\"document_store\"]:\n raise DeserializationError(\"Missing 'type' in document store's serialization data\")\n if init_params[\"document_store\"][\"type\"] not in document_store.registry:\n raise DeserializationError(f\"DocumentStore type '{init_params['document_store']['type']}' not found\")\n\n docstore_class = document_store.registry[init_params[\"document_store\"][\"type\"]]\n docstore = docstore_class.from_dict(init_params[\"document_store\"])\n data[\"init_parameters\"][\"document_store\"] = docstore\n return default_from_dict(cls, data)\n\n @component.output_types(documents=List[List[Document]])\n def run(\n self,\n queries_embeddings: List[List[float]],\n filters: Optional[Dict[str, Any]] = None,\n top_k: Optional[int] = None,\n scale_score: Optional[bool] = None,\n return_embedding: Optional[bool] = None,\n ):\n \"\"\"\n Run the MemoryEmbeddingRetriever on the given input data.\n\n :param queries_embeddings: Embeddings of the queries.\n :param filters: A dictionary with filters to narrow down the search space.\n :param top_k: The maximum number of documents to return.\n :param scale_score: Whether to scale the scores of the retrieved documents or not.\n :param return_embedding: Whether to return the embedding of the retrieved Documents.\n :return: The retrieved documents.\n\n :raises ValueError: If the specified DocumentStore is not found or is not a MemoryDocumentStore instance.\n \"\"\"\n if filters is None:\n filters = self.filters\n if top_k is None:\n top_k = self.top_k\n if scale_score is None:\n scale_score = self.scale_score\n if return_embedding is None:\n return_embedding = self.return_embedding\n\n docs = []\n for query_embedding in queries_embeddings:\n docs.append(\n self.document_store.embedding_retrieval(\n query_embedding=query_embedding,\n filters=filters,\n top_k=top_k,\n scale_score=scale_score,\n return_embedding=return_embedding,\n )\n )\n return {\"documents\": docs}\n", "path": "haystack/preview/components/retrievers/memory.py"}]} | 2,892 | 408 |
gh_patches_debug_16905 | rasdani/github-patches | git_diff | deeppavlov__DeepPavlov-101 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Downloading requirements
I was trying to install deeppavlov and had a problem following the installation steps.
1) There is no download.py file in root folder, it is in `deeppavlov/download.py`
``` sh
python download.py [-all]
```
2) Even if I use that file it outputs the error:
``` sh
(env) root@mysexyhost:~/work/ipavlov/DeepPavlov# python3 deeppavlov/download.py
/home/ubuntu/work/ipavlov/env/local/lib/python3.5/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
Using TensorFlow backend.
2018-03-12 07:34:11.490 ERROR in 'deeppavlov.core.models.serializable'['log'] at line 54: LOGGER ERROR: Can not initialise deeppavlov.core.models.serializable logger, logging to the stderr. Error traceback:
Traceback (most recent call last):
File "/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/core/common/log.py", line 32, in get_logger
with open(log_config_path) as log_config_json:
TypeError: invalid file: PosixPath('/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/log_config.json')
2018-03-12 07:34:11.491 ERROR in 'deeppavlov.core.models.keras_model'['log'] at line 54: LOGGER ERROR: Can not initialise deeppavlov.core.models.keras_model logger, logging to the stderr. Error traceback:
Traceback (most recent call last):
File "/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/core/common/log.py", line 32, in get_logger
with open(log_config_path) as log_config_json:
TypeError: invalid file: PosixPath('/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/log_config.json')
Traceback (most recent call last):
File "deeppavlov/download.py", line 24, in <module>
from deeppavlov.core.data.utils import download, download_decompress
File "/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/__init__.py", line 1, in <module>
import deeppavlov.core.models.keras_model
File "/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/core/models/keras_model.py", line 39, in <module>
class KerasModel(NNModel, metaclass=TfModelMeta):
File "/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/core/models/keras_model.py", line 143, in KerasModel
sample_weight_mode=None, weighted_metrics=None, target_tensors=None):
File "/home/ubuntu/work/ipavlov/env/local/lib/python3.5/site-packages/overrides/overrides.py", line 70, in overrides
method.__name__)
AssertionError: No super class method found for "load"
```
</issue>
<code>
[start of telegram_utils/telegram_ui.py]
1 """
2 Copyright 2017 Neural Networks and Deep Learning lab, MIPT
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15 """
16 import telebot
17
18 from deeppavlov.core.common.file import read_json
19 from deeppavlov.core.commands.infer import build_model_from_config
20
21
22 def init_bot_for_model(token, model):
23 bot = telebot.TeleBot(token)
24
25 model_name = type(model).__name__
26 models_info = read_json('../telegram_utils/models_info.json')
27 model_info = models_info[model_name] if model_name in models_info else models_info['@default']
28
29 @bot.message_handler(commands=['start'])
30 def send_start_message(message):
31 chat_id = message.chat.id
32 out_message = model_info['start_message']
33 if hasattr(model, 'reset'):
34 model.reset()
35 bot.send_message(chat_id, out_message)
36
37 @bot.message_handler(commands=['help'])
38 def send_help_message(message):
39 chat_id = message.chat.id
40 out_message = model_info['help_message']
41 bot.send_message(chat_id, out_message)
42
43 @bot.message_handler()
44 def handle_inference(message):
45 chat_id = message.chat.id
46 context = message.text
47
48 pred = model([context])
49 reply_message = str(pred[0])
50 bot.send_message(chat_id, reply_message)
51
52 bot.polling()
53
54
55 def interact_model_by_telegram(config_path, token):
56 config = read_json(config_path)
57 model = build_model_from_config(config)
58 init_bot_for_model(token, model)
59
[end of telegram_utils/telegram_ui.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/telegram_utils/telegram_ui.py b/telegram_utils/telegram_ui.py
--- a/telegram_utils/telegram_ui.py
+++ b/telegram_utils/telegram_ui.py
@@ -13,6 +13,8 @@
See the License for the specific language governing permissions and
limitations under the License.
"""
+from pathlib import Path
+
import telebot
from deeppavlov.core.common.file import read_json
@@ -23,7 +25,8 @@
bot = telebot.TeleBot(token)
model_name = type(model).__name__
- models_info = read_json('../telegram_utils/models_info.json')
+ config_path = Path(__file__).parent / 'models_info.json'
+ models_info = read_json(str(config_path))
model_info = models_info[model_name] if model_name in models_info else models_info['@default']
@bot.message_handler(commands=['start'])
| {"golden_diff": "diff --git a/telegram_utils/telegram_ui.py b/telegram_utils/telegram_ui.py\n--- a/telegram_utils/telegram_ui.py\n+++ b/telegram_utils/telegram_ui.py\n@@ -13,6 +13,8 @@\n See the License for the specific language governing permissions and\n limitations under the License.\n \"\"\"\n+from pathlib import Path\n+\n import telebot\n \n from deeppavlov.core.common.file import read_json\n@@ -23,7 +25,8 @@\n bot = telebot.TeleBot(token)\n \n model_name = type(model).__name__\n- models_info = read_json('../telegram_utils/models_info.json')\n+ config_path = Path(__file__).parent / 'models_info.json'\n+ models_info = read_json(str(config_path))\n model_info = models_info[model_name] if model_name in models_info else models_info['@default']\n \n @bot.message_handler(commands=['start'])\n", "issue": "Downloading requirements\nI was trying to install deeppavlov and had a problem following the installation steps.\r\n\r\n1) There is no download.py file in root folder, it is in `deeppavlov/download.py`\r\n``` sh\r\npython download.py [-all] \r\n```\r\n\r\n2) Even if I use that file it outputs the error:\r\n``` sh\r\n(env) root@mysexyhost:~/work/ipavlov/DeepPavlov# python3 deeppavlov/download.py\r\n/home/ubuntu/work/ipavlov/env/local/lib/python3.5/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\r\n from ._conv import register_converters as _register_converters\r\nUsing TensorFlow backend.\r\n2018-03-12 07:34:11.490 ERROR in 'deeppavlov.core.models.serializable'['log'] at line 54: LOGGER ERROR: Can not initialise deeppavlov.core.models.serializable logger, logging to the stderr. Error traceback:\r\nTraceback (most recent call last):\r\n File \"/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/core/common/log.py\", line 32, in get_logger\r\n with open(log_config_path) as log_config_json:\r\nTypeError: invalid file: PosixPath('/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/log_config.json')\r\n2018-03-12 07:34:11.491 ERROR in 'deeppavlov.core.models.keras_model'['log'] at line 54: LOGGER ERROR: Can not initialise deeppavlov.core.models.keras_model logger, logging to the stderr. Error traceback:\r\nTraceback (most recent call last):\r\n File \"/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/core/common/log.py\", line 32, in get_logger\r\n with open(log_config_path) as log_config_json:\r\nTypeError: invalid file: PosixPath('/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/log_config.json')\r\nTraceback (most recent call last):\r\n File \"deeppavlov/download.py\", line 24, in <module>\r\n from deeppavlov.core.data.utils import download, download_decompress\r\n File \"/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/__init__.py\", line 1, in <module>\r\n import deeppavlov.core.models.keras_model\r\n File \"/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/core/models/keras_model.py\", line 39, in <module>\r\n class KerasModel(NNModel, metaclass=TfModelMeta):\r\n File \"/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/core/models/keras_model.py\", line 143, in KerasModel\r\n sample_weight_mode=None, weighted_metrics=None, target_tensors=None):\r\n File \"/home/ubuntu/work/ipavlov/env/local/lib/python3.5/site-packages/overrides/overrides.py\", line 70, in overrides\r\n method.__name__)\r\nAssertionError: No super class method found for \"load\"\r\n```\n", "before_files": [{"content": "\"\"\"\nCopyright 2017 Neural Networks and Deep Learning lab, MIPT\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport telebot\n\nfrom deeppavlov.core.common.file import read_json\nfrom deeppavlov.core.commands.infer import build_model_from_config\n\n\ndef init_bot_for_model(token, model):\n bot = telebot.TeleBot(token)\n\n model_name = type(model).__name__\n models_info = read_json('../telegram_utils/models_info.json')\n model_info = models_info[model_name] if model_name in models_info else models_info['@default']\n\n @bot.message_handler(commands=['start'])\n def send_start_message(message):\n chat_id = message.chat.id\n out_message = model_info['start_message']\n if hasattr(model, 'reset'):\n model.reset()\n bot.send_message(chat_id, out_message)\n\n @bot.message_handler(commands=['help'])\n def send_help_message(message):\n chat_id = message.chat.id\n out_message = model_info['help_message']\n bot.send_message(chat_id, out_message)\n\n @bot.message_handler()\n def handle_inference(message):\n chat_id = message.chat.id\n context = message.text\n\n pred = model([context])\n reply_message = str(pred[0])\n bot.send_message(chat_id, reply_message)\n\n bot.polling()\n\n\ndef interact_model_by_telegram(config_path, token):\n config = read_json(config_path)\n model = build_model_from_config(config)\n init_bot_for_model(token, model)\n", "path": "telegram_utils/telegram_ui.py"}]} | 1,806 | 196 |
gh_patches_debug_19897 | rasdani/github-patches | git_diff | ARM-DOE__ACT-756 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Sunset Stamen maps in GeoDisplay and potentially replace
Stamen is transitioning their maps to stadia at the end of October 2023. ACT will need to deprecate that feature in GeoDisplay and potentially look for replacements.
https://github.com/SciTools/cartopy/pull/2266
</issue>
<code>
[start of act/plotting/geodisplay.py]
1 """
2 Stores the class for GeographicPlotDisplay.
3
4 """
5
6 import warnings
7
8 import matplotlib
9 import matplotlib.pyplot as plt
10 import numpy as np
11 import pandas as pd
12
13 from .plot import Display
14
15 try:
16 import cartopy.crs as ccrs
17 import cartopy.feature as cfeature
18 from cartopy.io import img_tiles
19
20 CARTOPY_AVAILABLE = True
21 except ImportError:
22 CARTOPY_AVAILABLE = False
23
24
25 class GeographicPlotDisplay(Display):
26 """
27 A class for making geographic tracer plot of aircraft, ship or other moving
28 platform plot.
29
30 This is inherited from the :func:`act.plotting.Display`
31 class and has therefore has the same attributes as that class.
32 See :func:`act.plotting.Display`
33 for more information. There are no additional attributes or parameters
34 to this class.
35
36 In order to create geographic plots, ACT needs the Cartopy package to be
37 installed on your system. More information about
38 Cartopy go here:https://scitools.org.uk/cartopy/docs/latest/ .
39
40 """
41
42 def __init__(self, ds, ds_name=None, **kwargs):
43 if not CARTOPY_AVAILABLE:
44 raise ImportError(
45 'Cartopy needs to be installed on your ' 'system to make geographic display plots.'
46 )
47 super().__init__(ds, ds_name, secondary_y_allowed=False, **kwargs)
48 if self.fig is None:
49 self.fig = plt.figure(**kwargs)
50
51 def geoplot(
52 self,
53 data_field=None,
54 lat_field='lat',
55 lon_field='lon',
56 dsname=None,
57 cbar_label=None,
58 title=None,
59 projection=None,
60 plot_buffer=0.08,
61 img_tile=None,
62 img_tile_args={},
63 tile=8,
64 stamen='terrain-background',
65 cartopy_feature=None,
66 cmap='rainbow',
67 text=None,
68 gridlines=True,
69 **kwargs,
70 ):
71 """
72 Creates a latitude and longitude plot of a time series data set with
73 data values indicated by color and described with a colorbar.
74 Latitude values must be in degree north (-90 to 90) and
75 longitude must be in degree east (-180 to 180).
76
77 Parameters
78 ----------
79 data_field : str
80 Name of data field in the dataset to plot.
81 lat_field : str
82 Name of latitude field in the dataset to use.
83 lon_field : str
84 Name of longitude field in the dataset to use.
85 dsname : str or None
86 The name of the datastream to plot. Set to None to make ACT
87 attempt to automatically determine this.
88 cbar_label : str
89 Label to use with colorbar. If set to None will attempt
90 to create label from long_name and units.
91 title : str
92 Plot title.
93 projection : cartopy.crs object
94 Project to use on plot. See
95 https://scitools.org.uk/cartopy/docs/latest/reference/projections.html?highlight=projections
96 plot_buffer : float
97 Buffer to add around data on plot in lat and lon dimension.
98 img_tile : str
99 Image to use for the plot background. Set to None to not use
100 background image. For all image background types, see:
101 https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html
102 Default is None.
103 img_tile_args : dict
104 Keyword arguments for the chosen img_tile. These arguments can be
105 found for the corresponding img_tile here:
106 https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html
107 Default is an empty dictionary.
108 tile : int
109 Tile zoom to use with background image. Higher number indicates
110 more resolution. A value of 8 is typical for a normal sonde plot.
111 cartopy_feature : list of str or str
112 Cartopy feature to add to plot.
113 cmap : str
114 Color map to use for colorbar.
115 text : dictionary
116 Dictionary of {text:[lon,lat]} to add to plot. Can have more
117 than one set of text to add.
118 gridlines : boolean
119 Use latitude and longitude gridlines.
120 **kwargs : keyword arguments
121 Any other keyword arguments that will be passed
122 into :func:`matplotlib.pyplot.scatter` when the figure
123 is made. See the matplotlib documentation for further details
124 on what keyword arguments are available.
125
126 """
127 if dsname is None and len(self._ds.keys()) > 1:
128 raise ValueError(
129 'You must choose a datastream when there are 2 '
130 'or more datasets in the GeographicPlotDisplay '
131 'object.'
132 )
133 elif dsname is None:
134 dsname = list(self._ds.keys())[0]
135
136 if data_field is None:
137 raise ValueError('You must enter the name of the data ' 'to be plotted.')
138
139 if projection is None:
140 if CARTOPY_AVAILABLE:
141 projection = ccrs.PlateCarree()
142
143 # Extract data from the dataset
144 try:
145 lat = self._ds[dsname][lat_field].values
146 except KeyError:
147 raise ValueError(
148 (
149 'You will need to provide the name of the '
150 "field if not '{}' to use for latitude "
151 'data.'
152 ).format(lat_field)
153 )
154 try:
155 lon = self._ds[dsname][lon_field].values
156 except KeyError:
157 raise ValueError(
158 (
159 'You will need to provide the name of the '
160 "field if not '{}' to use for longitude "
161 'data.'
162 ).format(lon_field)
163 )
164
165 # Set up metadata information for display on plot
166 if cbar_label is None:
167 try:
168 cbar_label = (
169 self._ds[dsname][data_field].attrs['long_name']
170 + ' ('
171 + self._ds[dsname][data_field].attrs['units']
172 + ')'
173 )
174 except KeyError:
175 cbar_label = data_field
176
177 lat_limits = [np.nanmin(lat), np.nanmax(lat)]
178 lon_limits = [np.nanmin(lon), np.nanmax(lon)]
179 box_size = np.max([np.abs(np.diff(lat_limits)), np.abs(np.diff(lon_limits))])
180 bx_buf = box_size * plot_buffer
181
182 lat_center = np.sum(lat_limits) / 2.0
183 lon_center = np.sum(lon_limits) / 2.0
184
185 lat_limits = [
186 lat_center - box_size / 2.0 - bx_buf,
187 lat_center + box_size / 2.0 + bx_buf,
188 ]
189 lon_limits = [
190 lon_center - box_size / 2.0 - bx_buf,
191 lon_center + box_size / 2.0 + bx_buf,
192 ]
193
194 data = self._ds[dsname][data_field].values
195
196 # Create base plot projection
197 ax = plt.axes(projection=projection)
198 plt.subplots_adjust(left=0.01, right=0.99, bottom=0.05, top=0.93)
199 ax.set_extent([lon_limits[0], lon_limits[1], lat_limits[0], lat_limits[1]], crs=projection)
200
201 if title is None:
202 try:
203 dim = list(self._ds[dsname][data_field].dims)
204 ts = pd.to_datetime(str(self._ds[dsname][dim[0]].values[0]))
205 date = ts.strftime('%Y-%m-%d')
206 time_str = ts.strftime('%H:%M:%S')
207 plt.title(' '.join([dsname, 'at', date, time_str]))
208 except NameError:
209 plt.title(dsname)
210 else:
211 plt.title(title)
212
213 if stamen and img_tile is None:
214 tiler = img_tiles.Stamen(stamen)
215 ax.add_image(tiler, tile)
216 warnings.warn(
217 "Stamen is deprecated in Cartopy and in future versions of ACT, "
218 "please use img_tile to specify the image background. ")
219 else:
220 if img_tile is not None:
221 tiler = getattr(img_tiles, img_tile)(**img_tile_args)
222 ax.add_image(tiler, tile)
223
224 colorbar_map = None
225 if cmap is not None:
226 colorbar_map = matplotlib.colormaps.get_cmap(cmap)
227 sc = ax.scatter(lon, lat, c=data, cmap=colorbar_map, **kwargs)
228 cbar = plt.colorbar(sc)
229 cbar.ax.set_ylabel(cbar_label)
230 if cartopy_feature is not None:
231 if isinstance(cartopy_feature, str):
232 cartopy_feature = [cartopy_feature]
233 cartopy_feature = [ii.upper() for ii in cartopy_feature]
234 if 'STATES' in cartopy_feature:
235 ax.add_feature(cfeature.STATES.with_scale('10m'))
236 if 'LAND' in cartopy_feature:
237 ax.add_feature(cfeature.LAND)
238 if 'OCEAN' in cartopy_feature:
239 ax.add_feature(cfeature.OCEAN)
240 if 'COASTLINE' in cartopy_feature:
241 ax.add_feature(cfeature.COASTLINE)
242 if 'BORDERS' in cartopy_feature:
243 ax.add_feature(cfeature.BORDERS, linestyle=':')
244 if 'LAKES' in cartopy_feature:
245 ax.add_feature(cfeature.LAKES, alpha=0.5)
246 if 'RIVERS' in cartopy_feature:
247 ax.add_feature(cfeature.RIVERS)
248 if text is not None:
249 for label, location in text.items():
250 ax.plot(location[0], location[1], marker='*', color='black')
251 ax.text(location[0], location[1], label, color='black')
252
253 if gridlines:
254 if projection == ccrs.PlateCarree() or projection == ccrs.Mercator:
255 gl = ax.gridlines(
256 crs=projection,
257 draw_labels=True,
258 linewidth=1,
259 color='gray',
260 alpha=0.5,
261 linestyle='--',
262 )
263 gl.top_labels = False
264 gl.left_labels = True
265 gl.bottom_labels = True
266 gl.right_labels = False
267 gl.xlabel_style = {'size': 6, 'color': 'gray'}
268 gl.ylabel_style = {'size': 6, 'color': 'gray'}
269 else:
270 # Labels are only currently supported for PlateCarree and Mercator
271 gl = ax.gridlines(
272 draw_labels=False,
273 linewidth=1,
274 color='gray',
275 alpha=0.5,
276 linestyle='--',
277 )
278
279 return ax
280
[end of act/plotting/geodisplay.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/act/plotting/geodisplay.py b/act/plotting/geodisplay.py
--- a/act/plotting/geodisplay.py
+++ b/act/plotting/geodisplay.py
@@ -61,7 +61,6 @@
img_tile=None,
img_tile_args={},
tile=8,
- stamen='terrain-background',
cartopy_feature=None,
cmap='rainbow',
text=None,
@@ -210,16 +209,9 @@
else:
plt.title(title)
- if stamen and img_tile is None:
- tiler = img_tiles.Stamen(stamen)
+ if img_tile is not None:
+ tiler = getattr(img_tiles, img_tile)(**img_tile_args)
ax.add_image(tiler, tile)
- warnings.warn(
- "Stamen is deprecated in Cartopy and in future versions of ACT, "
- "please use img_tile to specify the image background. ")
- else:
- if img_tile is not None:
- tiler = getattr(img_tiles, img_tile)(**img_tile_args)
- ax.add_image(tiler, tile)
colorbar_map = None
if cmap is not None:
| {"golden_diff": "diff --git a/act/plotting/geodisplay.py b/act/plotting/geodisplay.py\n--- a/act/plotting/geodisplay.py\n+++ b/act/plotting/geodisplay.py\n@@ -61,7 +61,6 @@\n img_tile=None,\n img_tile_args={},\n tile=8,\n- stamen='terrain-background',\n cartopy_feature=None,\n cmap='rainbow',\n text=None,\n@@ -210,16 +209,9 @@\n else:\n plt.title(title)\n \n- if stamen and img_tile is None:\n- tiler = img_tiles.Stamen(stamen)\n+ if img_tile is not None:\n+ tiler = getattr(img_tiles, img_tile)(**img_tile_args)\n ax.add_image(tiler, tile)\n- warnings.warn(\n- \"Stamen is deprecated in Cartopy and in future versions of ACT, \"\n- \"please use img_tile to specify the image background. \")\n- else:\n- if img_tile is not None:\n- tiler = getattr(img_tiles, img_tile)(**img_tile_args)\n- ax.add_image(tiler, tile)\n \n colorbar_map = None\n if cmap is not None:\n", "issue": "Sunset Stamen maps in GeoDisplay and potentially replace\nStamen is transitioning their maps to stadia at the end of October 2023. ACT will need to deprecate that feature in GeoDisplay and potentially look for replacements.\r\n\r\nhttps://github.com/SciTools/cartopy/pull/2266 \n", "before_files": [{"content": "\"\"\"\nStores the class for GeographicPlotDisplay.\n\n\"\"\"\n\nimport warnings\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom .plot import Display\n\ntry:\n import cartopy.crs as ccrs\n import cartopy.feature as cfeature\n from cartopy.io import img_tiles\n\n CARTOPY_AVAILABLE = True\nexcept ImportError:\n CARTOPY_AVAILABLE = False\n\n\nclass GeographicPlotDisplay(Display):\n \"\"\"\n A class for making geographic tracer plot of aircraft, ship or other moving\n platform plot.\n\n This is inherited from the :func:`act.plotting.Display`\n class and has therefore has the same attributes as that class.\n See :func:`act.plotting.Display`\n for more information. There are no additional attributes or parameters\n to this class.\n\n In order to create geographic plots, ACT needs the Cartopy package to be\n installed on your system. More information about\n Cartopy go here:https://scitools.org.uk/cartopy/docs/latest/ .\n\n \"\"\"\n\n def __init__(self, ds, ds_name=None, **kwargs):\n if not CARTOPY_AVAILABLE:\n raise ImportError(\n 'Cartopy needs to be installed on your ' 'system to make geographic display plots.'\n )\n super().__init__(ds, ds_name, secondary_y_allowed=False, **kwargs)\n if self.fig is None:\n self.fig = plt.figure(**kwargs)\n\n def geoplot(\n self,\n data_field=None,\n lat_field='lat',\n lon_field='lon',\n dsname=None,\n cbar_label=None,\n title=None,\n projection=None,\n plot_buffer=0.08,\n img_tile=None,\n img_tile_args={},\n tile=8,\n stamen='terrain-background',\n cartopy_feature=None,\n cmap='rainbow',\n text=None,\n gridlines=True,\n **kwargs,\n ):\n \"\"\"\n Creates a latitude and longitude plot of a time series data set with\n data values indicated by color and described with a colorbar.\n Latitude values must be in degree north (-90 to 90) and\n longitude must be in degree east (-180 to 180).\n\n Parameters\n ----------\n data_field : str\n Name of data field in the dataset to plot.\n lat_field : str\n Name of latitude field in the dataset to use.\n lon_field : str\n Name of longitude field in the dataset to use.\n dsname : str or None\n The name of the datastream to plot. Set to None to make ACT\n attempt to automatically determine this.\n cbar_label : str\n Label to use with colorbar. If set to None will attempt\n to create label from long_name and units.\n title : str\n Plot title.\n projection : cartopy.crs object\n Project to use on plot. See\n https://scitools.org.uk/cartopy/docs/latest/reference/projections.html?highlight=projections\n plot_buffer : float\n Buffer to add around data on plot in lat and lon dimension.\n img_tile : str\n Image to use for the plot background. Set to None to not use\n background image. For all image background types, see:\n https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html\n Default is None.\n img_tile_args : dict\n Keyword arguments for the chosen img_tile. These arguments can be\n found for the corresponding img_tile here:\n https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html\n Default is an empty dictionary.\n tile : int\n Tile zoom to use with background image. Higher number indicates\n more resolution. A value of 8 is typical for a normal sonde plot.\n cartopy_feature : list of str or str\n Cartopy feature to add to plot.\n cmap : str\n Color map to use for colorbar.\n text : dictionary\n Dictionary of {text:[lon,lat]} to add to plot. Can have more\n than one set of text to add.\n gridlines : boolean\n Use latitude and longitude gridlines.\n **kwargs : keyword arguments\n Any other keyword arguments that will be passed\n into :func:`matplotlib.pyplot.scatter` when the figure\n is made. See the matplotlib documentation for further details\n on what keyword arguments are available.\n\n \"\"\"\n if dsname is None and len(self._ds.keys()) > 1:\n raise ValueError(\n 'You must choose a datastream when there are 2 '\n 'or more datasets in the GeographicPlotDisplay '\n 'object.'\n )\n elif dsname is None:\n dsname = list(self._ds.keys())[0]\n\n if data_field is None:\n raise ValueError('You must enter the name of the data ' 'to be plotted.')\n\n if projection is None:\n if CARTOPY_AVAILABLE:\n projection = ccrs.PlateCarree()\n\n # Extract data from the dataset\n try:\n lat = self._ds[dsname][lat_field].values\n except KeyError:\n raise ValueError(\n (\n 'You will need to provide the name of the '\n \"field if not '{}' to use for latitude \"\n 'data.'\n ).format(lat_field)\n )\n try:\n lon = self._ds[dsname][lon_field].values\n except KeyError:\n raise ValueError(\n (\n 'You will need to provide the name of the '\n \"field if not '{}' to use for longitude \"\n 'data.'\n ).format(lon_field)\n )\n\n # Set up metadata information for display on plot\n if cbar_label is None:\n try:\n cbar_label = (\n self._ds[dsname][data_field].attrs['long_name']\n + ' ('\n + self._ds[dsname][data_field].attrs['units']\n + ')'\n )\n except KeyError:\n cbar_label = data_field\n\n lat_limits = [np.nanmin(lat), np.nanmax(lat)]\n lon_limits = [np.nanmin(lon), np.nanmax(lon)]\n box_size = np.max([np.abs(np.diff(lat_limits)), np.abs(np.diff(lon_limits))])\n bx_buf = box_size * plot_buffer\n\n lat_center = np.sum(lat_limits) / 2.0\n lon_center = np.sum(lon_limits) / 2.0\n\n lat_limits = [\n lat_center - box_size / 2.0 - bx_buf,\n lat_center + box_size / 2.0 + bx_buf,\n ]\n lon_limits = [\n lon_center - box_size / 2.0 - bx_buf,\n lon_center + box_size / 2.0 + bx_buf,\n ]\n\n data = self._ds[dsname][data_field].values\n\n # Create base plot projection\n ax = plt.axes(projection=projection)\n plt.subplots_adjust(left=0.01, right=0.99, bottom=0.05, top=0.93)\n ax.set_extent([lon_limits[0], lon_limits[1], lat_limits[0], lat_limits[1]], crs=projection)\n\n if title is None:\n try:\n dim = list(self._ds[dsname][data_field].dims)\n ts = pd.to_datetime(str(self._ds[dsname][dim[0]].values[0]))\n date = ts.strftime('%Y-%m-%d')\n time_str = ts.strftime('%H:%M:%S')\n plt.title(' '.join([dsname, 'at', date, time_str]))\n except NameError:\n plt.title(dsname)\n else:\n plt.title(title)\n\n if stamen and img_tile is None:\n tiler = img_tiles.Stamen(stamen)\n ax.add_image(tiler, tile)\n warnings.warn(\n \"Stamen is deprecated in Cartopy and in future versions of ACT, \"\n \"please use img_tile to specify the image background. \")\n else:\n if img_tile is not None:\n tiler = getattr(img_tiles, img_tile)(**img_tile_args)\n ax.add_image(tiler, tile)\n\n colorbar_map = None\n if cmap is not None:\n colorbar_map = matplotlib.colormaps.get_cmap(cmap)\n sc = ax.scatter(lon, lat, c=data, cmap=colorbar_map, **kwargs)\n cbar = plt.colorbar(sc)\n cbar.ax.set_ylabel(cbar_label)\n if cartopy_feature is not None:\n if isinstance(cartopy_feature, str):\n cartopy_feature = [cartopy_feature]\n cartopy_feature = [ii.upper() for ii in cartopy_feature]\n if 'STATES' in cartopy_feature:\n ax.add_feature(cfeature.STATES.with_scale('10m'))\n if 'LAND' in cartopy_feature:\n ax.add_feature(cfeature.LAND)\n if 'OCEAN' in cartopy_feature:\n ax.add_feature(cfeature.OCEAN)\n if 'COASTLINE' in cartopy_feature:\n ax.add_feature(cfeature.COASTLINE)\n if 'BORDERS' in cartopy_feature:\n ax.add_feature(cfeature.BORDERS, linestyle=':')\n if 'LAKES' in cartopy_feature:\n ax.add_feature(cfeature.LAKES, alpha=0.5)\n if 'RIVERS' in cartopy_feature:\n ax.add_feature(cfeature.RIVERS)\n if text is not None:\n for label, location in text.items():\n ax.plot(location[0], location[1], marker='*', color='black')\n ax.text(location[0], location[1], label, color='black')\n\n if gridlines:\n if projection == ccrs.PlateCarree() or projection == ccrs.Mercator:\n gl = ax.gridlines(\n crs=projection,\n draw_labels=True,\n linewidth=1,\n color='gray',\n alpha=0.5,\n linestyle='--',\n )\n gl.top_labels = False\n gl.left_labels = True\n gl.bottom_labels = True\n gl.right_labels = False\n gl.xlabel_style = {'size': 6, 'color': 'gray'}\n gl.ylabel_style = {'size': 6, 'color': 'gray'}\n else:\n # Labels are only currently supported for PlateCarree and Mercator\n gl = ax.gridlines(\n draw_labels=False,\n linewidth=1,\n color='gray',\n alpha=0.5,\n linestyle='--',\n )\n\n return ax\n", "path": "act/plotting/geodisplay.py"}]} | 3,635 | 274 |
gh_patches_debug_38229 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-contrib-1018 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
elasticsearch span_name not correctly
**Steps to reproduce**
Describe exactly how to reproduce the error. Include a code sample if applicable.
**What is the expected behavior?**
Our index is created on a rolling basis
Therefore, we will use a specific time to query the index when querying, and when the plugin generates span_name, it will bring the unified index, so that span_name cannot be described as an operation. As follows
```
Elasticsearch/bkfta_action_20220228_read,bkfta_action_20220301_read,bkfta_action_20220302_read,bkfta_action_20220303_read,bkfta_action_20220304_read,bkfta_action_20220305_read,bkfta_action_20220306_read,bkfta_action_20220307_read/_search
```
**What is the actual behavior?**
span_name shuld be
```
Elasticsearch/{index_name}/_search
```
**Additional context**
Add any other context about the problem here.
</issue>
<code>
[start of instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 This library allows tracing HTTP elasticsearch made by the
17 `elasticsearch <https://elasticsearch-py.readthedocs.io/en/master/>`_ library.
18
19 Usage
20 -----
21
22 .. code-block:: python
23
24 from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor
25 import elasticsearch
26
27
28 # instrument elasticsearch
29 ElasticsearchInstrumentor().instrument()
30
31 # Using elasticsearch as normal now will automatically generate spans
32 es = elasticsearch.Elasticsearch()
33 es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})
34 es.get(index='my-index', doc_type='my-type', id=1)
35
36 Elasticsearch instrumentation prefixes operation names with the string "Elasticsearch". This
37 can be changed to a different string by either setting the `OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX`
38 environment variable or by passing the prefix as an argument to the instrumentor. For example,
39
40
41 .. code-block:: python
42
43 ElasticsearchInstrumentor("my-custom-prefix").instrument()
44
45
46 The `instrument` method accepts the following keyword args:
47
48 tracer_provider (TracerProvider) - an optional tracer provider
49 request_hook (Callable) - a function with extra user-defined logic to be performed before performing the request
50 this function signature is:
51 def request_hook(span: Span, method: str, url: str, kwargs)
52 response_hook (Callable) - a function with extra user-defined logic to be performed after performing the request
53 this function signature is:
54 def response_hook(span: Span, response: dict)
55
56 for example:
57
58 .. code: python
59
60 from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor
61 import elasticsearch
62
63 def request_hook(span, method, url, kwargs):
64 if span and span.is_recording():
65 span.set_attribute("custom_user_attribute_from_request_hook", "some-value")
66
67 def response_hook(span, response):
68 if span and span.is_recording():
69 span.set_attribute("custom_user_attribute_from_response_hook", "some-value")
70
71 # instrument elasticsearch with request and response hooks
72 ElasticsearchInstrumentor().instrument(request_hook=request_hook, response_hook=response_hook)
73
74 # Using elasticsearch as normal now will automatically generate spans,
75 # including user custom attributes added from the hooks
76 es = elasticsearch.Elasticsearch()
77 es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})
78 es.get(index='my-index', doc_type='my-type', id=1)
79
80 API
81 ---
82 """
83
84 import re
85 from logging import getLogger
86 from os import environ
87 from typing import Collection
88
89 import elasticsearch
90 import elasticsearch.exceptions
91 from wrapt import wrap_function_wrapper as _wrap
92
93 from opentelemetry.instrumentation.elasticsearch.package import _instruments
94 from opentelemetry.instrumentation.elasticsearch.version import __version__
95 from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
96 from opentelemetry.instrumentation.utils import unwrap
97 from opentelemetry.semconv.trace import SpanAttributes
98 from opentelemetry.trace import SpanKind, get_tracer
99
100 logger = getLogger(__name__)
101
102
103 # Values to add as tags from the actual
104 # payload returned by Elasticsearch, if any.
105 _ATTRIBUTES_FROM_RESULT = [
106 "found",
107 "timed_out",
108 "took",
109 ]
110
111 _DEFAULT_OP_NAME = "request"
112
113
114 class ElasticsearchInstrumentor(BaseInstrumentor):
115 """An instrumentor for elasticsearch
116 See `BaseInstrumentor`
117 """
118
119 def __init__(self, span_name_prefix=None):
120 if not span_name_prefix:
121 span_name_prefix = environ.get(
122 "OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX",
123 "Elasticsearch",
124 )
125 self._span_name_prefix = span_name_prefix.strip()
126 super().__init__()
127
128 def instrumentation_dependencies(self) -> Collection[str]:
129 return _instruments
130
131 def _instrument(self, **kwargs):
132 """
133 Instruments elasticsarch module
134 """
135 tracer_provider = kwargs.get("tracer_provider")
136 tracer = get_tracer(__name__, __version__, tracer_provider)
137 request_hook = kwargs.get("request_hook")
138 response_hook = kwargs.get("response_hook")
139 _wrap(
140 elasticsearch,
141 "Transport.perform_request",
142 _wrap_perform_request(
143 tracer, self._span_name_prefix, request_hook, response_hook
144 ),
145 )
146
147 def _uninstrument(self, **kwargs):
148 unwrap(elasticsearch.Transport, "perform_request")
149
150
151 _regex_doc_url = re.compile(r"/_doc/([^/]+)")
152
153
154 def _wrap_perform_request(
155 tracer, span_name_prefix, request_hook=None, response_hook=None
156 ):
157 # pylint: disable=R0912
158 def wrapper(wrapped, _, args, kwargs):
159 method = url = None
160 try:
161 method, url, *_ = args
162 except IndexError:
163 logger.warning(
164 "expected perform_request to receive two positional arguments. "
165 "Got %d",
166 len(args),
167 )
168
169 op_name = span_name_prefix + (url or method or _DEFAULT_OP_NAME)
170 doc_id = None
171 if url:
172 # TODO: This regex-based solution avoids creating an unbounded number of span names, but should be replaced by instrumenting individual Elasticsearch methods instead of Transport.perform_request()
173 # A limitation of the regex is that only the '_doc' mapping type is supported. Mapping types are deprecated since Elasticsearch 7
174 # https://github.com/open-telemetry/opentelemetry-python-contrib/issues/708
175 match = _regex_doc_url.search(url)
176 if match is not None:
177 # Remove the full document ID from the URL
178 doc_span = match.span()
179 op_name = (
180 span_name_prefix
181 + url[: doc_span[0]]
182 + "/_doc/:id"
183 + url[doc_span[1] :]
184 )
185 # Put the document ID in attributes
186 doc_id = match.group(1)
187 params = kwargs.get("params", {})
188 body = kwargs.get("body", None)
189
190 with tracer.start_as_current_span(
191 op_name,
192 kind=SpanKind.CLIENT,
193 ) as span:
194
195 if callable(request_hook):
196 request_hook(span, method, url, kwargs)
197
198 if span.is_recording():
199 attributes = {
200 SpanAttributes.DB_SYSTEM: "elasticsearch",
201 }
202 if url:
203 attributes["elasticsearch.url"] = url
204 if method:
205 attributes["elasticsearch.method"] = method
206 if body:
207 attributes[SpanAttributes.DB_STATEMENT] = str(body)
208 if params:
209 attributes["elasticsearch.params"] = str(params)
210 if doc_id:
211 attributes["elasticsearch.id"] = doc_id
212 for key, value in attributes.items():
213 span.set_attribute(key, value)
214
215 rv = wrapped(*args, **kwargs)
216 if isinstance(rv, dict) and span.is_recording():
217 for member in _ATTRIBUTES_FROM_RESULT:
218 if member in rv:
219 span.set_attribute(
220 f"elasticsearch.{member}",
221 str(rv[member]),
222 )
223
224 if callable(response_hook):
225 response_hook(span, rv)
226 return rv
227
228 return wrapper
229
[end of instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py
--- a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py
@@ -150,11 +150,14 @@
_regex_doc_url = re.compile(r"/_doc/([^/]+)")
+# search api https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html
+_regex_search_url = re.compile(r"/([^/]+)/_search[/]?")
+
def _wrap_perform_request(
tracer, span_name_prefix, request_hook=None, response_hook=None
):
- # pylint: disable=R0912
+ # pylint: disable=R0912,R0914
def wrapper(wrapped, _, args, kwargs):
method = url = None
try:
@@ -167,7 +170,10 @@
)
op_name = span_name_prefix + (url or method or _DEFAULT_OP_NAME)
+
doc_id = None
+ search_target = None
+
if url:
# TODO: This regex-based solution avoids creating an unbounded number of span names, but should be replaced by instrumenting individual Elasticsearch methods instead of Transport.perform_request()
# A limitation of the regex is that only the '_doc' mapping type is supported. Mapping types are deprecated since Elasticsearch 7
@@ -184,6 +190,11 @@
)
# Put the document ID in attributes
doc_id = match.group(1)
+ match = _regex_search_url.search(url)
+ if match is not None:
+ op_name = span_name_prefix + "/<target>/_search"
+ search_target = match.group(1)
+
params = kwargs.get("params", {})
body = kwargs.get("body", None)
@@ -209,6 +220,8 @@
attributes["elasticsearch.params"] = str(params)
if doc_id:
attributes["elasticsearch.id"] = doc_id
+ if search_target:
+ attributes["elasticsearch.target"] = search_target
for key, value in attributes.items():
span.set_attribute(key, value)
| {"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py\n@@ -150,11 +150,14 @@\n \n _regex_doc_url = re.compile(r\"/_doc/([^/]+)\")\n \n+# search api https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html\n+_regex_search_url = re.compile(r\"/([^/]+)/_search[/]?\")\n+\n \n def _wrap_perform_request(\n tracer, span_name_prefix, request_hook=None, response_hook=None\n ):\n- # pylint: disable=R0912\n+ # pylint: disable=R0912,R0914\n def wrapper(wrapped, _, args, kwargs):\n method = url = None\n try:\n@@ -167,7 +170,10 @@\n )\n \n op_name = span_name_prefix + (url or method or _DEFAULT_OP_NAME)\n+\n doc_id = None\n+ search_target = None\n+\n if url:\n # TODO: This regex-based solution avoids creating an unbounded number of span names, but should be replaced by instrumenting individual Elasticsearch methods instead of Transport.perform_request()\n # A limitation of the regex is that only the '_doc' mapping type is supported. Mapping types are deprecated since Elasticsearch 7\n@@ -184,6 +190,11 @@\n )\n # Put the document ID in attributes\n doc_id = match.group(1)\n+ match = _regex_search_url.search(url)\n+ if match is not None:\n+ op_name = span_name_prefix + \"/<target>/_search\"\n+ search_target = match.group(1)\n+\n params = kwargs.get(\"params\", {})\n body = kwargs.get(\"body\", None)\n \n@@ -209,6 +220,8 @@\n attributes[\"elasticsearch.params\"] = str(params)\n if doc_id:\n attributes[\"elasticsearch.id\"] = doc_id\n+ if search_target:\n+ attributes[\"elasticsearch.target\"] = search_target\n for key, value in attributes.items():\n span.set_attribute(key, value)\n", "issue": "elasticsearch span_name not correctly\n\r\n**Steps to reproduce**\r\nDescribe exactly how to reproduce the error. Include a code sample if applicable.\r\n\r\n**What is the expected behavior?**\r\nOur index is created on a rolling basis\r\nTherefore, we will use a specific time to query the index when querying, and when the plugin generates span_name, it will bring the unified index, so that span_name cannot be described as an operation. As follows\r\n```\r\nElasticsearch/bkfta_action_20220228_read,bkfta_action_20220301_read,bkfta_action_20220302_read,bkfta_action_20220303_read,bkfta_action_20220304_read,bkfta_action_20220305_read,bkfta_action_20220306_read,bkfta_action_20220307_read/_search\r\n```\r\n**What is the actual behavior?**\r\nspan_name shuld be\r\n```\r\nElasticsearch/{index_name}/_search\r\n```\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis library allows tracing HTTP elasticsearch made by the\n`elasticsearch <https://elasticsearch-py.readthedocs.io/en/master/>`_ library.\n\nUsage\n-----\n\n.. code-block:: python\n\n from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor\n import elasticsearch\n\n\n # instrument elasticsearch\n ElasticsearchInstrumentor().instrument()\n\n # Using elasticsearch as normal now will automatically generate spans\n es = elasticsearch.Elasticsearch()\n es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})\n es.get(index='my-index', doc_type='my-type', id=1)\n\nElasticsearch instrumentation prefixes operation names with the string \"Elasticsearch\". This\ncan be changed to a different string by either setting the `OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX`\nenvironment variable or by passing the prefix as an argument to the instrumentor. For example,\n\n\n.. code-block:: python\n\n ElasticsearchInstrumentor(\"my-custom-prefix\").instrument()\n\n\nThe `instrument` method accepts the following keyword args:\n\ntracer_provider (TracerProvider) - an optional tracer provider\nrequest_hook (Callable) - a function with extra user-defined logic to be performed before performing the request\n this function signature is:\n def request_hook(span: Span, method: str, url: str, kwargs)\nresponse_hook (Callable) - a function with extra user-defined logic to be performed after performing the request\n this function signature is:\n def response_hook(span: Span, response: dict)\n\nfor example:\n\n.. code: python\n\n from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor\n import elasticsearch\n\n def request_hook(span, method, url, kwargs):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_request_hook\", \"some-value\")\n\n def response_hook(span, response):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_response_hook\", \"some-value\")\n\n # instrument elasticsearch with request and response hooks\n ElasticsearchInstrumentor().instrument(request_hook=request_hook, response_hook=response_hook)\n\n # Using elasticsearch as normal now will automatically generate spans,\n # including user custom attributes added from the hooks\n es = elasticsearch.Elasticsearch()\n es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})\n es.get(index='my-index', doc_type='my-type', id=1)\n\nAPI\n---\n\"\"\"\n\nimport re\nfrom logging import getLogger\nfrom os import environ\nfrom typing import Collection\n\nimport elasticsearch\nimport elasticsearch.exceptions\nfrom wrapt import wrap_function_wrapper as _wrap\n\nfrom opentelemetry.instrumentation.elasticsearch.package import _instruments\nfrom opentelemetry.instrumentation.elasticsearch.version import __version__\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.utils import unwrap\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace import SpanKind, get_tracer\n\nlogger = getLogger(__name__)\n\n\n# Values to add as tags from the actual\n# payload returned by Elasticsearch, if any.\n_ATTRIBUTES_FROM_RESULT = [\n \"found\",\n \"timed_out\",\n \"took\",\n]\n\n_DEFAULT_OP_NAME = \"request\"\n\n\nclass ElasticsearchInstrumentor(BaseInstrumentor):\n \"\"\"An instrumentor for elasticsearch\n See `BaseInstrumentor`\n \"\"\"\n\n def __init__(self, span_name_prefix=None):\n if not span_name_prefix:\n span_name_prefix = environ.get(\n \"OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX\",\n \"Elasticsearch\",\n )\n self._span_name_prefix = span_name_prefix.strip()\n super().__init__()\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n \"\"\"\n Instruments elasticsarch module\n \"\"\"\n tracer_provider = kwargs.get(\"tracer_provider\")\n tracer = get_tracer(__name__, __version__, tracer_provider)\n request_hook = kwargs.get(\"request_hook\")\n response_hook = kwargs.get(\"response_hook\")\n _wrap(\n elasticsearch,\n \"Transport.perform_request\",\n _wrap_perform_request(\n tracer, self._span_name_prefix, request_hook, response_hook\n ),\n )\n\n def _uninstrument(self, **kwargs):\n unwrap(elasticsearch.Transport, \"perform_request\")\n\n\n_regex_doc_url = re.compile(r\"/_doc/([^/]+)\")\n\n\ndef _wrap_perform_request(\n tracer, span_name_prefix, request_hook=None, response_hook=None\n):\n # pylint: disable=R0912\n def wrapper(wrapped, _, args, kwargs):\n method = url = None\n try:\n method, url, *_ = args\n except IndexError:\n logger.warning(\n \"expected perform_request to receive two positional arguments. \"\n \"Got %d\",\n len(args),\n )\n\n op_name = span_name_prefix + (url or method or _DEFAULT_OP_NAME)\n doc_id = None\n if url:\n # TODO: This regex-based solution avoids creating an unbounded number of span names, but should be replaced by instrumenting individual Elasticsearch methods instead of Transport.perform_request()\n # A limitation of the regex is that only the '_doc' mapping type is supported. Mapping types are deprecated since Elasticsearch 7\n # https://github.com/open-telemetry/opentelemetry-python-contrib/issues/708\n match = _regex_doc_url.search(url)\n if match is not None:\n # Remove the full document ID from the URL\n doc_span = match.span()\n op_name = (\n span_name_prefix\n + url[: doc_span[0]]\n + \"/_doc/:id\"\n + url[doc_span[1] :]\n )\n # Put the document ID in attributes\n doc_id = match.group(1)\n params = kwargs.get(\"params\", {})\n body = kwargs.get(\"body\", None)\n\n with tracer.start_as_current_span(\n op_name,\n kind=SpanKind.CLIENT,\n ) as span:\n\n if callable(request_hook):\n request_hook(span, method, url, kwargs)\n\n if span.is_recording():\n attributes = {\n SpanAttributes.DB_SYSTEM: \"elasticsearch\",\n }\n if url:\n attributes[\"elasticsearch.url\"] = url\n if method:\n attributes[\"elasticsearch.method\"] = method\n if body:\n attributes[SpanAttributes.DB_STATEMENT] = str(body)\n if params:\n attributes[\"elasticsearch.params\"] = str(params)\n if doc_id:\n attributes[\"elasticsearch.id\"] = doc_id\n for key, value in attributes.items():\n span.set_attribute(key, value)\n\n rv = wrapped(*args, **kwargs)\n if isinstance(rv, dict) and span.is_recording():\n for member in _ATTRIBUTES_FROM_RESULT:\n if member in rv:\n span.set_attribute(\n f\"elasticsearch.{member}\",\n str(rv[member]),\n )\n\n if callable(response_hook):\n response_hook(span, rv)\n return rv\n\n return wrapper\n", "path": "instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py"}]} | 3,086 | 548 |
gh_patches_debug_11304 | rasdani/github-patches | git_diff | comic__grand-challenge.org-1450 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add example code for overlay segment configuration for workstation
</issue>
<code>
[start of app/grandchallenge/workstation_configs/forms.py]
1 from django.forms import ModelForm
2
3 from grandchallenge.core.forms import SaveFormInitMixin
4 from grandchallenge.core.widgets import JSONEditorWidget
5 from grandchallenge.workstation_configs.models import (
6 OVERLAY_SEGMENTS_SCHEMA,
7 WorkstationConfig,
8 )
9
10
11 class WorkstationConfigForm(SaveFormInitMixin, ModelForm):
12 class Meta:
13 model = WorkstationConfig
14 fields = (
15 "title",
16 "description",
17 "window_presets",
18 "default_window_preset",
19 "default_slab_thickness_mm",
20 "default_slab_render_method",
21 "default_orientation",
22 "default_overlay_alpha",
23 "default_overlay_lut",
24 "default_overlay_interpolation",
25 "overlay_segments",
26 "default_zoom_scale",
27 "show_image_info_plugin",
28 "show_display_plugin",
29 "show_invert_tool",
30 "show_flip_tool",
31 "show_window_level_tool",
32 "show_reset_tool",
33 )
34 widgets = {
35 "overlay_segments": JSONEditorWidget(
36 schema=OVERLAY_SEGMENTS_SCHEMA
37 ),
38 }
39
[end of app/grandchallenge/workstation_configs/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/grandchallenge/workstation_configs/forms.py b/app/grandchallenge/workstation_configs/forms.py
--- a/app/grandchallenge/workstation_configs/forms.py
+++ b/app/grandchallenge/workstation_configs/forms.py
@@ -36,3 +36,14 @@
schema=OVERLAY_SEGMENTS_SCHEMA
),
}
+ help_texts = {
+ "overlay_segments": (
+ "If an categorical overlay is shown, it is possible to show toggles "
+ "to change the visibility of the different overlay categories. To do "
+ "so, configure the categories that should be displayed. Data from the"
+ " algorithm's output.json can be added as an extra label to each "
+ "toggle using jinja templating. "
+ 'For example: [{ "voxel_value": 0, "name": "Level 0", "visible": '
+ 'false, "metric_template": "{{metrics.volumes[0]}} mm³"},]'
+ ),
+ }
| {"golden_diff": "diff --git a/app/grandchallenge/workstation_configs/forms.py b/app/grandchallenge/workstation_configs/forms.py\n--- a/app/grandchallenge/workstation_configs/forms.py\n+++ b/app/grandchallenge/workstation_configs/forms.py\n@@ -36,3 +36,14 @@\n schema=OVERLAY_SEGMENTS_SCHEMA\n ),\n }\n+ help_texts = {\n+ \"overlay_segments\": (\n+ \"If an categorical overlay is shown, it is possible to show toggles \"\n+ \"to change the visibility of the different overlay categories. To do \"\n+ \"so, configure the categories that should be displayed. Data from the\"\n+ \" algorithm's output.json can be added as an extra label to each \"\n+ \"toggle using jinja templating. \"\n+ 'For example: [{ \"voxel_value\": 0, \"name\": \"Level 0\", \"visible\": '\n+ 'false, \"metric_template\": \"{{metrics.volumes[0]}} mm\u00b3\"},]'\n+ ),\n+ }\n", "issue": "Add example code for overlay segment configuration for workstation\n\n", "before_files": [{"content": "from django.forms import ModelForm\n\nfrom grandchallenge.core.forms import SaveFormInitMixin\nfrom grandchallenge.core.widgets import JSONEditorWidget\nfrom grandchallenge.workstation_configs.models import (\n OVERLAY_SEGMENTS_SCHEMA,\n WorkstationConfig,\n)\n\n\nclass WorkstationConfigForm(SaveFormInitMixin, ModelForm):\n class Meta:\n model = WorkstationConfig\n fields = (\n \"title\",\n \"description\",\n \"window_presets\",\n \"default_window_preset\",\n \"default_slab_thickness_mm\",\n \"default_slab_render_method\",\n \"default_orientation\",\n \"default_overlay_alpha\",\n \"default_overlay_lut\",\n \"default_overlay_interpolation\",\n \"overlay_segments\",\n \"default_zoom_scale\",\n \"show_image_info_plugin\",\n \"show_display_plugin\",\n \"show_invert_tool\",\n \"show_flip_tool\",\n \"show_window_level_tool\",\n \"show_reset_tool\",\n )\n widgets = {\n \"overlay_segments\": JSONEditorWidget(\n schema=OVERLAY_SEGMENTS_SCHEMA\n ),\n }\n", "path": "app/grandchallenge/workstation_configs/forms.py"}]} | 844 | 220 |
gh_patches_debug_15232 | rasdani/github-patches | git_diff | pre-commit__pre-commit-914 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
C:\python3\python.exe does not exist while running pre-commit on Windows
Hi,
I'm trying to run the pre-commit commands of the [ESSS/barril](https://github.com/ESSS/barril) repository on my Windows machine, but I'm getting this error:
```
λ tox -e linting
linting installed: aspy.yaml==1.1.1,cfgv==1.4.0,identify==1.1.8,importlib-metadata==0.8,importlib-resources==1.0.2,nodeenv==1.3.3,pre-commit==1.14.0,PyYAML==3.13,six==1.12.0,toml==0.10.0,virtualenv==16.2.0,zipp==0.3.3
linting run-test-pre: PYTHONHASHSEED='554'
linting runtests: commands[0] | pre-commit run --all-files --show-diff-on-failure
[INFO] Installing environment for https://github.com/ambv/black.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
An unexpected error has occurred: CalledProcessError: Command: ('w:\\projects\\barril\\.tox\\linting\\scripts\\python.exe', '-mvirtualenv', 'C:\\Users\\darci\\.cache\\pre-commit\\repoufam_s8_\\py_env-python3', '-p', 'C:\\python3\\python.exe')
Return code: 3
Expected return code: 0
Output:
The path C:\python3\python.exe (from --python=C:\python3\python.exe) does not exist
Errors: (none)
Check the log at C:\Users\darci/.cache\pre-commit\pre-commit.log
```
The `C:\python3` path does not exist on my machine, I'm using `conda` and `virtualenv` to create a Python 3.6 virtual environment, from which I execute `tox -e linting`.
Here's the contents of the `pre-commit-config.yaml`:
```yaml
repos:
- repo: https://github.com/ambv/black
rev: 18.6b4
hooks:
- id: black
args: [--safe, --quiet]
language_version: python3
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v1.3.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: debug-statements
- id: flake8
- repo: local
hooks:
- id: rst
name: rst
entry: rst-lint --encoding utf-8
files: ^(CHANGELOG.rst|HOWTORELEASE.rst|README.rst)$
language: python
additional_dependencies: [pygments, restructuredtext_lint]
```
Any hints of what the problem might be?
cc @nicoddemus
</issue>
<code>
[start of pre_commit/languages/python.py]
1 from __future__ import unicode_literals
2
3 import contextlib
4 import os
5 import sys
6
7 import pre_commit.constants as C
8 from pre_commit.envcontext import envcontext
9 from pre_commit.envcontext import UNSET
10 from pre_commit.envcontext import Var
11 from pre_commit.languages import helpers
12 from pre_commit.parse_shebang import find_executable
13 from pre_commit.util import CalledProcessError
14 from pre_commit.util import clean_path_on_failure
15 from pre_commit.util import cmd_output
16
17
18 ENVIRONMENT_DIR = 'py_env'
19
20
21 def bin_dir(venv):
22 """On windows there's a different directory for the virtualenv"""
23 bin_part = 'Scripts' if os.name == 'nt' else 'bin'
24 return os.path.join(venv, bin_part)
25
26
27 def get_env_patch(venv):
28 return (
29 ('PYTHONHOME', UNSET),
30 ('VIRTUAL_ENV', venv),
31 ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),
32 )
33
34
35 def _find_by_py_launcher(version): # pragma: no cover (windows only)
36 if version.startswith('python'):
37 try:
38 return cmd_output(
39 'py', '-{}'.format(version[len('python'):]),
40 '-c', 'import sys; print(sys.executable)',
41 )[1].strip()
42 except CalledProcessError:
43 pass
44
45
46 def _get_default_version(): # pragma: no cover (platform dependent)
47 def _norm(path):
48 _, exe = os.path.split(path.lower())
49 exe, _, _ = exe.partition('.exe')
50 if find_executable(exe) and exe not in {'python', 'pythonw'}:
51 return exe
52
53 # First attempt from `sys.executable` (or the realpath)
54 # On linux, I see these common sys.executables:
55 #
56 # system `python`: /usr/bin/python -> python2.7
57 # system `python2`: /usr/bin/python2 -> python2.7
58 # virtualenv v: v/bin/python (will not return from this loop)
59 # virtualenv v -ppython2: v/bin/python -> python2
60 # virtualenv v -ppython2.7: v/bin/python -> python2.7
61 # virtualenv v -ppypy: v/bin/python -> v/bin/pypy
62 for path in {sys.executable, os.path.realpath(sys.executable)}:
63 exe = _norm(path)
64 if exe:
65 return exe
66
67 # Next try the `pythonX.X` executable
68 exe = 'python{}.{}'.format(*sys.version_info)
69 if find_executable(exe):
70 return exe
71
72 if _find_by_py_launcher(exe):
73 return exe
74
75 # Give a best-effort try for windows
76 if os.path.exists(r'C:\{}\python.exe'.format(exe.replace('.', ''))):
77 return exe
78
79 # We tried!
80 return C.DEFAULT
81
82
83 def get_default_version():
84 # TODO: when dropping python2, use `functools.lru_cache(maxsize=1)`
85 try:
86 return get_default_version.cached_version
87 except AttributeError:
88 get_default_version.cached_version = _get_default_version()
89 return get_default_version()
90
91
92 def norm_version(version):
93 if os.name == 'nt': # pragma: no cover (windows)
94 # Try looking up by name
95 version_exec = find_executable(version)
96 if version_exec and version_exec != version:
97 return version_exec
98
99 version_exec = _find_by_py_launcher(version)
100 if version_exec:
101 return version_exec
102
103 # If it is in the form pythonx.x search in the default
104 # place on windows
105 if version.startswith('python'):
106 return r'C:\{}\python.exe'.format(version.replace('.', ''))
107
108 # Otherwise assume it is a path
109 return os.path.expanduser(version)
110
111
112 def py_interface(_dir, _make_venv):
113 @contextlib.contextmanager
114 def in_env(prefix, language_version):
115 envdir = prefix.path(helpers.environment_dir(_dir, language_version))
116 with envcontext(get_env_patch(envdir)):
117 yield
118
119 def healthy(prefix, language_version):
120 with in_env(prefix, language_version):
121 retcode, _, _ = cmd_output(
122 'python', '-c',
123 'import ctypes, datetime, io, os, ssl, weakref',
124 retcode=None,
125 )
126 return retcode == 0
127
128 def run_hook(hook, file_args):
129 with in_env(hook.prefix, hook.language_version):
130 return helpers.run_xargs(hook, helpers.to_cmd(hook), file_args)
131
132 def install_environment(prefix, version, additional_dependencies):
133 additional_dependencies = tuple(additional_dependencies)
134 directory = helpers.environment_dir(_dir, version)
135
136 env_dir = prefix.path(directory)
137 with clean_path_on_failure(env_dir):
138 if version != C.DEFAULT:
139 python = norm_version(version)
140 else:
141 python = os.path.realpath(sys.executable)
142 _make_venv(env_dir, python)
143 with in_env(prefix, version):
144 helpers.run_setup_cmd(
145 prefix, ('pip', 'install', '.') + additional_dependencies,
146 )
147
148 return in_env, healthy, run_hook, install_environment
149
150
151 def make_venv(envdir, python):
152 env = dict(os.environ, VIRTUALENV_NO_DOWNLOAD='1')
153 cmd = (sys.executable, '-mvirtualenv', envdir, '-p', python)
154 cmd_output(*cmd, env=env, cwd='/')
155
156
157 _interface = py_interface(ENVIRONMENT_DIR, make_venv)
158 in_env, healthy, run_hook, install_environment = _interface
159
[end of pre_commit/languages/python.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py
--- a/pre_commit/languages/python.py
+++ b/pre_commit/languages/python.py
@@ -89,8 +89,26 @@
return get_default_version()
+def _sys_executable_matches(version):
+ if version == 'python':
+ return True
+ elif not version.startswith('python'):
+ return False
+
+ try:
+ info = tuple(int(p) for p in version[len('python'):].split('.'))
+ except ValueError:
+ return False
+
+ return sys.version_info[:len(info)] == info
+
+
def norm_version(version):
if os.name == 'nt': # pragma: no cover (windows)
+ # first see if our current executable is appropriate
+ if _sys_executable_matches(version):
+ return sys.executable
+
# Try looking up by name
version_exec = find_executable(version)
if version_exec and version_exec != version:
| {"golden_diff": "diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py\n--- a/pre_commit/languages/python.py\n+++ b/pre_commit/languages/python.py\n@@ -89,8 +89,26 @@\n return get_default_version()\n \n \n+def _sys_executable_matches(version):\n+ if version == 'python':\n+ return True\n+ elif not version.startswith('python'):\n+ return False\n+\n+ try:\n+ info = tuple(int(p) for p in version[len('python'):].split('.'))\n+ except ValueError:\n+ return False\n+\n+ return sys.version_info[:len(info)] == info\n+\n+\n def norm_version(version):\n if os.name == 'nt': # pragma: no cover (windows)\n+ # first see if our current executable is appropriate\n+ if _sys_executable_matches(version):\n+ return sys.executable\n+\n # Try looking up by name\n version_exec = find_executable(version)\n if version_exec and version_exec != version:\n", "issue": "C:\\python3\\python.exe does not exist while running pre-commit on Windows\nHi,\r\n\r\nI'm trying to run the pre-commit commands of the [ESSS/barril](https://github.com/ESSS/barril) repository on my Windows machine, but I'm getting this error:\r\n\r\n```\r\n \u03bb tox -e linting\r\nlinting installed: aspy.yaml==1.1.1,cfgv==1.4.0,identify==1.1.8,importlib-metadata==0.8,importlib-resources==1.0.2,nodeenv==1.3.3,pre-commit==1.14.0,PyYAML==3.13,six==1.12.0,toml==0.10.0,virtualenv==16.2.0,zipp==0.3.3\r\nlinting run-test-pre: PYTHONHASHSEED='554'\r\nlinting runtests: commands[0] | pre-commit run --all-files --show-diff-on-failure\r\n[INFO] Installing environment for https://github.com/ambv/black.\r\n[INFO] Once installed this environment will be reused.\r\n[INFO] This may take a few minutes...\r\nAn unexpected error has occurred: CalledProcessError: Command: ('w:\\\\projects\\\\barril\\\\.tox\\\\linting\\\\scripts\\\\python.exe', '-mvirtualenv', 'C:\\\\Users\\\\darci\\\\.cache\\\\pre-commit\\\\repoufam_s8_\\\\py_env-python3', '-p', 'C:\\\\python3\\\\python.exe')\r\nReturn code: 3\r\nExpected return code: 0\r\nOutput:\r\n The path C:\\python3\\python.exe (from --python=C:\\python3\\python.exe) does not exist\r\n\r\nErrors: (none)\r\n\r\nCheck the log at C:\\Users\\darci/.cache\\pre-commit\\pre-commit.log\r\n```\r\n\r\nThe `C:\\python3` path does not exist on my machine, I'm using `conda` and `virtualenv` to create a Python 3.6 virtual environment, from which I execute `tox -e linting`.\r\n\r\nHere's the contents of the `pre-commit-config.yaml`:\r\n\r\n```yaml\r\nrepos:\r\n- repo: https://github.com/ambv/black\r\n rev: 18.6b4\r\n hooks:\r\n - id: black\r\n args: [--safe, --quiet]\r\n language_version: python3\r\n- repo: https://github.com/pre-commit/pre-commit-hooks\r\n rev: v1.3.0\r\n hooks:\r\n - id: trailing-whitespace\r\n - id: end-of-file-fixer\r\n - id: check-yaml\r\n - id: debug-statements\r\n - id: flake8\r\n- repo: local\r\n hooks:\r\n - id: rst\r\n name: rst\r\n entry: rst-lint --encoding utf-8\r\n files: ^(CHANGELOG.rst|HOWTORELEASE.rst|README.rst)$\r\n language: python\r\n additional_dependencies: [pygments, restructuredtext_lint]\r\n```\r\n\r\nAny hints of what the problem might be?\r\n\r\ncc @nicoddemus \n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport os\nimport sys\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.languages import helpers\nfrom pre_commit.parse_shebang import find_executable\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\n\n\nENVIRONMENT_DIR = 'py_env'\n\n\ndef bin_dir(venv):\n \"\"\"On windows there's a different directory for the virtualenv\"\"\"\n bin_part = 'Scripts' if os.name == 'nt' else 'bin'\n return os.path.join(venv, bin_part)\n\n\ndef get_env_patch(venv):\n return (\n ('PYTHONHOME', UNSET),\n ('VIRTUAL_ENV', venv),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n\n\ndef _find_by_py_launcher(version): # pragma: no cover (windows only)\n if version.startswith('python'):\n try:\n return cmd_output(\n 'py', '-{}'.format(version[len('python'):]),\n '-c', 'import sys; print(sys.executable)',\n )[1].strip()\n except CalledProcessError:\n pass\n\n\ndef _get_default_version(): # pragma: no cover (platform dependent)\n def _norm(path):\n _, exe = os.path.split(path.lower())\n exe, _, _ = exe.partition('.exe')\n if find_executable(exe) and exe not in {'python', 'pythonw'}:\n return exe\n\n # First attempt from `sys.executable` (or the realpath)\n # On linux, I see these common sys.executables:\n #\n # system `python`: /usr/bin/python -> python2.7\n # system `python2`: /usr/bin/python2 -> python2.7\n # virtualenv v: v/bin/python (will not return from this loop)\n # virtualenv v -ppython2: v/bin/python -> python2\n # virtualenv v -ppython2.7: v/bin/python -> python2.7\n # virtualenv v -ppypy: v/bin/python -> v/bin/pypy\n for path in {sys.executable, os.path.realpath(sys.executable)}:\n exe = _norm(path)\n if exe:\n return exe\n\n # Next try the `pythonX.X` executable\n exe = 'python{}.{}'.format(*sys.version_info)\n if find_executable(exe):\n return exe\n\n if _find_by_py_launcher(exe):\n return exe\n\n # Give a best-effort try for windows\n if os.path.exists(r'C:\\{}\\python.exe'.format(exe.replace('.', ''))):\n return exe\n\n # We tried!\n return C.DEFAULT\n\n\ndef get_default_version():\n # TODO: when dropping python2, use `functools.lru_cache(maxsize=1)`\n try:\n return get_default_version.cached_version\n except AttributeError:\n get_default_version.cached_version = _get_default_version()\n return get_default_version()\n\n\ndef norm_version(version):\n if os.name == 'nt': # pragma: no cover (windows)\n # Try looking up by name\n version_exec = find_executable(version)\n if version_exec and version_exec != version:\n return version_exec\n\n version_exec = _find_by_py_launcher(version)\n if version_exec:\n return version_exec\n\n # If it is in the form pythonx.x search in the default\n # place on windows\n if version.startswith('python'):\n return r'C:\\{}\\python.exe'.format(version.replace('.', ''))\n\n # Otherwise assume it is a path\n return os.path.expanduser(version)\n\n\ndef py_interface(_dir, _make_venv):\n @contextlib.contextmanager\n def in_env(prefix, language_version):\n envdir = prefix.path(helpers.environment_dir(_dir, language_version))\n with envcontext(get_env_patch(envdir)):\n yield\n\n def healthy(prefix, language_version):\n with in_env(prefix, language_version):\n retcode, _, _ = cmd_output(\n 'python', '-c',\n 'import ctypes, datetime, io, os, ssl, weakref',\n retcode=None,\n )\n return retcode == 0\n\n def run_hook(hook, file_args):\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, helpers.to_cmd(hook), file_args)\n\n def install_environment(prefix, version, additional_dependencies):\n additional_dependencies = tuple(additional_dependencies)\n directory = helpers.environment_dir(_dir, version)\n\n env_dir = prefix.path(directory)\n with clean_path_on_failure(env_dir):\n if version != C.DEFAULT:\n python = norm_version(version)\n else:\n python = os.path.realpath(sys.executable)\n _make_venv(env_dir, python)\n with in_env(prefix, version):\n helpers.run_setup_cmd(\n prefix, ('pip', 'install', '.') + additional_dependencies,\n )\n\n return in_env, healthy, run_hook, install_environment\n\n\ndef make_venv(envdir, python):\n env = dict(os.environ, VIRTUALENV_NO_DOWNLOAD='1')\n cmd = (sys.executable, '-mvirtualenv', envdir, '-p', python)\n cmd_output(*cmd, env=env, cwd='/')\n\n\n_interface = py_interface(ENVIRONMENT_DIR, make_venv)\nin_env, healthy, run_hook, install_environment = _interface\n", "path": "pre_commit/languages/python.py"}]} | 2,815 | 222 |
gh_patches_debug_6829 | rasdani/github-patches | git_diff | zulip__zulip-23329 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Rename Recent topics to Recent conversations
Once #19449 has been resolved, we should rename Recent topics to Recent conversations across the board. This includes the left sidebar, documentation, settings menus, marketing pages, and anywhere else this term may be used.
</issue>
<code>
[start of zerver/lib/onboarding.py]
1 from typing import Dict, List
2
3 from django.conf import settings
4 from django.db import transaction
5 from django.db.models import Count
6 from django.utils.translation import gettext as _
7 from django.utils.translation import override as override_language
8
9 from zerver.actions.create_realm import setup_realm_internal_bots
10 from zerver.actions.message_send import (
11 do_send_messages,
12 internal_prep_stream_message_by_name,
13 internal_send_private_message,
14 )
15 from zerver.actions.reactions import do_add_reaction
16 from zerver.lib.emoji import emoji_name_to_emoji_code
17 from zerver.lib.message import SendMessageRequest
18 from zerver.models import Message, Realm, UserProfile, get_system_bot
19
20
21 def missing_any_realm_internal_bots() -> bool:
22 bot_emails = [
23 bot["email_template"] % (settings.INTERNAL_BOT_DOMAIN,)
24 for bot in settings.REALM_INTERNAL_BOTS
25 ]
26 bot_counts = {
27 email: count
28 for email, count in UserProfile.objects.filter(email__in=bot_emails)
29 .values_list("email")
30 .annotate(Count("id"))
31 }
32 realm_count = Realm.objects.count()
33 return any(bot_counts.get(email, 0) < realm_count for email in bot_emails)
34
35
36 def create_if_missing_realm_internal_bots() -> None:
37 """This checks if there is any realm internal bot missing.
38
39 If that is the case, it creates the missing realm internal bots.
40 """
41 if missing_any_realm_internal_bots():
42 for realm in Realm.objects.all():
43 setup_realm_internal_bots(realm)
44
45
46 def send_initial_pms(user: UserProfile) -> None:
47 organization_setup_text = ""
48
49 # We need to override the language in this code path, because it's
50 # called from account registration, which is a pre-account API
51 # request and thus may not have the user's language context yet.
52 with override_language(user.default_language):
53 if user.is_realm_admin:
54 help_url = user.realm.uri + "/help/getting-your-organization-started-with-zulip"
55 organization_setup_text = (
56 " " + _("We also have a guide for [Setting up your organization]({help_url}).")
57 ).format(help_url=help_url)
58
59 welcome_msg = _("Hello, and welcome to Zulip!") + "👋"
60 demo_org_warning = ""
61 if user.realm.demo_organization_scheduled_deletion_date is not None:
62 demo_org_warning = (
63 _(
64 "Note that this is a [demo organization]({demo_org_help_url}) and will be "
65 "**automatically deleted** in 30 days."
66 )
67 + "\n\n"
68 )
69
70 content = "".join(
71 [
72 welcome_msg + " ",
73 _("This is a private message from me, Welcome Bot.") + "\n\n",
74 _(
75 "If you are new to Zulip, check out our [Getting started guide]({getting_started_url})!"
76 ),
77 "{organization_setup_text}" + "\n\n",
78 "{demo_org_warning}",
79 _(
80 "I can also help you get set up! Just click anywhere on this message or press `r` to reply."
81 )
82 + "\n\n",
83 _("Here are a few messages I understand:") + " ",
84 bot_commands(),
85 ]
86 )
87
88 content = content.format(
89 organization_setup_text=organization_setup_text,
90 demo_org_warning=demo_org_warning,
91 demo_org_help_url="/help/demo-organizations",
92 getting_started_url="/help/getting-started-with-zulip",
93 )
94
95 internal_send_private_message(
96 get_system_bot(settings.WELCOME_BOT, user.realm_id), user, content
97 )
98
99
100 def bot_commands(no_help_command: bool = False) -> str:
101 commands = [
102 "apps",
103 "profile",
104 "theme",
105 "streams",
106 "topics",
107 "message formatting",
108 "keyboard shortcuts",
109 ]
110 if not no_help_command:
111 commands.append("help")
112 return ", ".join(["`" + command + "`" for command in commands]) + "."
113
114
115 def select_welcome_bot_response(human_response_lower: str) -> str:
116 # Given the raw (pre-markdown-rendering) content for a private
117 # message from the user to Welcome Bot, select the appropriate reply.
118 if human_response_lower in ["app", "apps"]:
119 return _(
120 "You can [download](/apps) the [mobile and desktop apps](/apps). "
121 "Zulip also works great in a browser."
122 )
123 elif human_response_lower == "profile":
124 return _(
125 "Go to [Profile settings](#settings/profile) "
126 "to add a [profile picture](/help/change-your-profile-picture) "
127 "and edit your [profile information](/help/edit-your-profile)."
128 )
129 elif human_response_lower == "theme":
130 return _(
131 "Go to [Display settings](#settings/display-settings) "
132 "to [switch between the light and dark themes](/help/dark-theme), "
133 "[pick your favorite emoji theme](/help/emoji-and-emoticons#change-your-emoji-set), "
134 "[change your language](/help/change-your-language), "
135 "and make other tweaks to your Zulip experience."
136 )
137 elif human_response_lower in ["stream", "streams", "channel", "channels"]:
138 return "".join(
139 [
140 _(
141 "In Zulip, streams [determine who gets a message](/help/streams-and-topics). "
142 "They are similar to channels in other chat apps."
143 )
144 + "\n\n",
145 _("[Browse and subscribe to streams](#streams/all)."),
146 ]
147 )
148 elif human_response_lower in ["topic", "topics"]:
149 return "".join(
150 [
151 _(
152 "In Zulip, topics [tell you what a message is about](/help/streams-and-topics). "
153 "They are light-weight subjects, very similar to the subject line of an email."
154 )
155 + "\n\n",
156 _(
157 "Check out [Recent conversations](#recent_topics) to see what's happening! "
158 'You can return to this conversation by clicking "Private messages" in the upper left.'
159 ),
160 ]
161 )
162 elif human_response_lower in ["keyboard", "shortcuts", "keyboard shortcuts"]:
163 return "".join(
164 [
165 _(
166 "Zulip's [keyboard shortcuts](#keyboard-shortcuts) "
167 "let you navigate the app quickly and efficiently."
168 )
169 + "\n\n",
170 _("Press `?` any time to see a [cheat sheet](#keyboard-shortcuts)."),
171 ]
172 )
173 elif human_response_lower in ["formatting", "message formatting"]:
174 return "".join(
175 [
176 _(
177 "Zulip uses [Markdown](/help/format-your-message-using-markdown), "
178 "an intuitive format for **bold**, *italics*, bulleted lists, and more. "
179 "Click [here](#message-formatting) for a cheat sheet."
180 )
181 + "\n\n",
182 _(
183 "Check out our [messaging tips](/help/messaging-tips) "
184 "to learn about emoji reactions, code blocks and much more!"
185 ),
186 ]
187 )
188 elif human_response_lower in ["help", "?"]:
189 return "".join(
190 [
191 _("Here are a few messages I understand:") + " ",
192 bot_commands(no_help_command=True) + "\n\n",
193 _(
194 "Check out our [Getting started guide](/help/getting-started-with-zulip), "
195 "or browse the [Help center](/help/) to learn more!"
196 ),
197 ]
198 )
199 else:
200 return "".join(
201 [
202 _(
203 "I’m sorry, I did not understand your message. Please try one of the following commands:"
204 )
205 + " ",
206 bot_commands(),
207 ]
208 )
209
210
211 def send_welcome_bot_response(send_request: SendMessageRequest) -> None:
212 """Given the send_request object for a private message from the user
213 to welcome-bot, trigger the welcome-bot reply."""
214 welcome_bot = get_system_bot(settings.WELCOME_BOT, send_request.message.sender.realm_id)
215 human_response_lower = send_request.message.content.lower()
216 content = select_welcome_bot_response(human_response_lower)
217 internal_send_private_message(welcome_bot, send_request.message.sender, content)
218
219
220 @transaction.atomic
221 def send_initial_realm_messages(realm: Realm) -> None:
222 welcome_bot = get_system_bot(settings.WELCOME_BOT, realm.id)
223 # Make sure each stream created in the realm creation process has at least one message below
224 # Order corresponds to the ordering of the streams on the left sidebar, to make the initial Home
225 # view slightly less overwhelming
226 content_of_private_streams_topic = (
227 _("This is a private stream, as indicated by the lock icon next to the stream name.")
228 + " "
229 + _("Private streams are only visible to stream members.")
230 + "\n"
231 "\n"
232 + _(
233 "To manage this stream, go to [Stream settings]({stream_settings_url}) "
234 "and click on `{initial_private_stream_name}`."
235 )
236 ).format(
237 stream_settings_url="#streams/subscribed",
238 initial_private_stream_name=Realm.INITIAL_PRIVATE_STREAM_NAME,
239 )
240
241 content1_of_topic_demonstration_topic = (
242 _(
243 "This is a message on stream #**{default_notification_stream_name}** with the "
244 "topic `topic demonstration`."
245 )
246 ).format(default_notification_stream_name=Realm.DEFAULT_NOTIFICATION_STREAM_NAME)
247
248 content2_of_topic_demonstration_topic = (
249 _("Topics are a lightweight tool to keep conversations organized.")
250 + " "
251 + _("You can learn more about topics at [Streams and topics]({about_topics_help_url}).")
252 ).format(about_topics_help_url="/help/streams-and-topics")
253
254 content_of_swimming_turtles_topic = (
255 _(
256 "This is a message on stream #**{default_notification_stream_name}** with the "
257 "topic `swimming turtles`."
258 )
259 + "\n"
260 "\n"
261 "[](/static/images/cute/turtle.png)"
262 "\n"
263 "\n"
264 + _(
265 "[Start a new topic]({start_topic_help_url}) any time you're not replying to a \
266 previous message."
267 )
268 ).format(
269 default_notification_stream_name=Realm.DEFAULT_NOTIFICATION_STREAM_NAME,
270 start_topic_help_url="/help/start-a-new-topic",
271 )
272
273 welcome_messages: List[Dict[str, str]] = [
274 {
275 "stream": Realm.INITIAL_PRIVATE_STREAM_NAME,
276 "topic": "private streams",
277 "content": content_of_private_streams_topic,
278 },
279 {
280 "stream": Realm.DEFAULT_NOTIFICATION_STREAM_NAME,
281 "topic": "topic demonstration",
282 "content": content1_of_topic_demonstration_topic,
283 },
284 {
285 "stream": Realm.DEFAULT_NOTIFICATION_STREAM_NAME,
286 "topic": "topic demonstration",
287 "content": content2_of_topic_demonstration_topic,
288 },
289 {
290 "stream": realm.DEFAULT_NOTIFICATION_STREAM_NAME,
291 "topic": "swimming turtles",
292 "content": content_of_swimming_turtles_topic,
293 },
294 ]
295
296 messages = [
297 internal_prep_stream_message_by_name(
298 realm,
299 welcome_bot,
300 message["stream"],
301 message["topic"],
302 message["content"],
303 )
304 for message in welcome_messages
305 ]
306 message_ids = do_send_messages(messages)
307
308 # We find the one of our just-sent messages with turtle.png in it,
309 # and react to it. This is a bit hacky, but works and is kinda a
310 # 1-off thing.
311 turtle_message = Message.objects.select_for_update().get(
312 id__in=message_ids, content__icontains="cute/turtle.png"
313 )
314 (emoji_code, reaction_type) = emoji_name_to_emoji_code(realm, "turtle")
315 do_add_reaction(welcome_bot, turtle_message, "turtle", emoji_code, reaction_type)
316
[end of zerver/lib/onboarding.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zerver/lib/onboarding.py b/zerver/lib/onboarding.py
--- a/zerver/lib/onboarding.py
+++ b/zerver/lib/onboarding.py
@@ -154,7 +154,7 @@
)
+ "\n\n",
_(
- "Check out [Recent conversations](#recent_topics) to see what's happening! "
+ "Check out [Recent conversations](#recent) to see what's happening! "
'You can return to this conversation by clicking "Private messages" in the upper left.'
),
]
| {"golden_diff": "diff --git a/zerver/lib/onboarding.py b/zerver/lib/onboarding.py\n--- a/zerver/lib/onboarding.py\n+++ b/zerver/lib/onboarding.py\n@@ -154,7 +154,7 @@\n )\n + \"\\n\\n\",\n _(\n- \"Check out [Recent conversations](#recent_topics) to see what's happening! \"\n+ \"Check out [Recent conversations](#recent) to see what's happening! \"\n 'You can return to this conversation by clicking \"Private messages\" in the upper left.'\n ),\n ]\n", "issue": "Rename Recent topics to Recent conversations\nOnce #19449 has been resolved, we should rename Recent topics to Recent conversations across the board. This includes the left sidebar, documentation, settings menus, marketing pages, and anywhere else this term may be used.\n", "before_files": [{"content": "from typing import Dict, List\n\nfrom django.conf import settings\nfrom django.db import transaction\nfrom django.db.models import Count\nfrom django.utils.translation import gettext as _\nfrom django.utils.translation import override as override_language\n\nfrom zerver.actions.create_realm import setup_realm_internal_bots\nfrom zerver.actions.message_send import (\n do_send_messages,\n internal_prep_stream_message_by_name,\n internal_send_private_message,\n)\nfrom zerver.actions.reactions import do_add_reaction\nfrom zerver.lib.emoji import emoji_name_to_emoji_code\nfrom zerver.lib.message import SendMessageRequest\nfrom zerver.models import Message, Realm, UserProfile, get_system_bot\n\n\ndef missing_any_realm_internal_bots() -> bool:\n bot_emails = [\n bot[\"email_template\"] % (settings.INTERNAL_BOT_DOMAIN,)\n for bot in settings.REALM_INTERNAL_BOTS\n ]\n bot_counts = {\n email: count\n for email, count in UserProfile.objects.filter(email__in=bot_emails)\n .values_list(\"email\")\n .annotate(Count(\"id\"))\n }\n realm_count = Realm.objects.count()\n return any(bot_counts.get(email, 0) < realm_count for email in bot_emails)\n\n\ndef create_if_missing_realm_internal_bots() -> None:\n \"\"\"This checks if there is any realm internal bot missing.\n\n If that is the case, it creates the missing realm internal bots.\n \"\"\"\n if missing_any_realm_internal_bots():\n for realm in Realm.objects.all():\n setup_realm_internal_bots(realm)\n\n\ndef send_initial_pms(user: UserProfile) -> None:\n organization_setup_text = \"\"\n\n # We need to override the language in this code path, because it's\n # called from account registration, which is a pre-account API\n # request and thus may not have the user's language context yet.\n with override_language(user.default_language):\n if user.is_realm_admin:\n help_url = user.realm.uri + \"/help/getting-your-organization-started-with-zulip\"\n organization_setup_text = (\n \" \" + _(\"We also have a guide for [Setting up your organization]({help_url}).\")\n ).format(help_url=help_url)\n\n welcome_msg = _(\"Hello, and welcome to Zulip!\") + \"\ud83d\udc4b\"\n demo_org_warning = \"\"\n if user.realm.demo_organization_scheduled_deletion_date is not None:\n demo_org_warning = (\n _(\n \"Note that this is a [demo organization]({demo_org_help_url}) and will be \"\n \"**automatically deleted** in 30 days.\"\n )\n + \"\\n\\n\"\n )\n\n content = \"\".join(\n [\n welcome_msg + \" \",\n _(\"This is a private message from me, Welcome Bot.\") + \"\\n\\n\",\n _(\n \"If you are new to Zulip, check out our [Getting started guide]({getting_started_url})!\"\n ),\n \"{organization_setup_text}\" + \"\\n\\n\",\n \"{demo_org_warning}\",\n _(\n \"I can also help you get set up! Just click anywhere on this message or press `r` to reply.\"\n )\n + \"\\n\\n\",\n _(\"Here are a few messages I understand:\") + \" \",\n bot_commands(),\n ]\n )\n\n content = content.format(\n organization_setup_text=organization_setup_text,\n demo_org_warning=demo_org_warning,\n demo_org_help_url=\"/help/demo-organizations\",\n getting_started_url=\"/help/getting-started-with-zulip\",\n )\n\n internal_send_private_message(\n get_system_bot(settings.WELCOME_BOT, user.realm_id), user, content\n )\n\n\ndef bot_commands(no_help_command: bool = False) -> str:\n commands = [\n \"apps\",\n \"profile\",\n \"theme\",\n \"streams\",\n \"topics\",\n \"message formatting\",\n \"keyboard shortcuts\",\n ]\n if not no_help_command:\n commands.append(\"help\")\n return \", \".join([\"`\" + command + \"`\" for command in commands]) + \".\"\n\n\ndef select_welcome_bot_response(human_response_lower: str) -> str:\n # Given the raw (pre-markdown-rendering) content for a private\n # message from the user to Welcome Bot, select the appropriate reply.\n if human_response_lower in [\"app\", \"apps\"]:\n return _(\n \"You can [download](/apps) the [mobile and desktop apps](/apps). \"\n \"Zulip also works great in a browser.\"\n )\n elif human_response_lower == \"profile\":\n return _(\n \"Go to [Profile settings](#settings/profile) \"\n \"to add a [profile picture](/help/change-your-profile-picture) \"\n \"and edit your [profile information](/help/edit-your-profile).\"\n )\n elif human_response_lower == \"theme\":\n return _(\n \"Go to [Display settings](#settings/display-settings) \"\n \"to [switch between the light and dark themes](/help/dark-theme), \"\n \"[pick your favorite emoji theme](/help/emoji-and-emoticons#change-your-emoji-set), \"\n \"[change your language](/help/change-your-language), \"\n \"and make other tweaks to your Zulip experience.\"\n )\n elif human_response_lower in [\"stream\", \"streams\", \"channel\", \"channels\"]:\n return \"\".join(\n [\n _(\n \"In Zulip, streams [determine who gets a message](/help/streams-and-topics). \"\n \"They are similar to channels in other chat apps.\"\n )\n + \"\\n\\n\",\n _(\"[Browse and subscribe to streams](#streams/all).\"),\n ]\n )\n elif human_response_lower in [\"topic\", \"topics\"]:\n return \"\".join(\n [\n _(\n \"In Zulip, topics [tell you what a message is about](/help/streams-and-topics). \"\n \"They are light-weight subjects, very similar to the subject line of an email.\"\n )\n + \"\\n\\n\",\n _(\n \"Check out [Recent conversations](#recent_topics) to see what's happening! \"\n 'You can return to this conversation by clicking \"Private messages\" in the upper left.'\n ),\n ]\n )\n elif human_response_lower in [\"keyboard\", \"shortcuts\", \"keyboard shortcuts\"]:\n return \"\".join(\n [\n _(\n \"Zulip's [keyboard shortcuts](#keyboard-shortcuts) \"\n \"let you navigate the app quickly and efficiently.\"\n )\n + \"\\n\\n\",\n _(\"Press `?` any time to see a [cheat sheet](#keyboard-shortcuts).\"),\n ]\n )\n elif human_response_lower in [\"formatting\", \"message formatting\"]:\n return \"\".join(\n [\n _(\n \"Zulip uses [Markdown](/help/format-your-message-using-markdown), \"\n \"an intuitive format for **bold**, *italics*, bulleted lists, and more. \"\n \"Click [here](#message-formatting) for a cheat sheet.\"\n )\n + \"\\n\\n\",\n _(\n \"Check out our [messaging tips](/help/messaging-tips) \"\n \"to learn about emoji reactions, code blocks and much more!\"\n ),\n ]\n )\n elif human_response_lower in [\"help\", \"?\"]:\n return \"\".join(\n [\n _(\"Here are a few messages I understand:\") + \" \",\n bot_commands(no_help_command=True) + \"\\n\\n\",\n _(\n \"Check out our [Getting started guide](/help/getting-started-with-zulip), \"\n \"or browse the [Help center](/help/) to learn more!\"\n ),\n ]\n )\n else:\n return \"\".join(\n [\n _(\n \"I\u2019m sorry, I did not understand your message. Please try one of the following commands:\"\n )\n + \" \",\n bot_commands(),\n ]\n )\n\n\ndef send_welcome_bot_response(send_request: SendMessageRequest) -> None:\n \"\"\"Given the send_request object for a private message from the user\n to welcome-bot, trigger the welcome-bot reply.\"\"\"\n welcome_bot = get_system_bot(settings.WELCOME_BOT, send_request.message.sender.realm_id)\n human_response_lower = send_request.message.content.lower()\n content = select_welcome_bot_response(human_response_lower)\n internal_send_private_message(welcome_bot, send_request.message.sender, content)\n\n\[email protected]\ndef send_initial_realm_messages(realm: Realm) -> None:\n welcome_bot = get_system_bot(settings.WELCOME_BOT, realm.id)\n # Make sure each stream created in the realm creation process has at least one message below\n # Order corresponds to the ordering of the streams on the left sidebar, to make the initial Home\n # view slightly less overwhelming\n content_of_private_streams_topic = (\n _(\"This is a private stream, as indicated by the lock icon next to the stream name.\")\n + \" \"\n + _(\"Private streams are only visible to stream members.\")\n + \"\\n\"\n \"\\n\"\n + _(\n \"To manage this stream, go to [Stream settings]({stream_settings_url}) \"\n \"and click on `{initial_private_stream_name}`.\"\n )\n ).format(\n stream_settings_url=\"#streams/subscribed\",\n initial_private_stream_name=Realm.INITIAL_PRIVATE_STREAM_NAME,\n )\n\n content1_of_topic_demonstration_topic = (\n _(\n \"This is a message on stream #**{default_notification_stream_name}** with the \"\n \"topic `topic demonstration`.\"\n )\n ).format(default_notification_stream_name=Realm.DEFAULT_NOTIFICATION_STREAM_NAME)\n\n content2_of_topic_demonstration_topic = (\n _(\"Topics are a lightweight tool to keep conversations organized.\")\n + \" \"\n + _(\"You can learn more about topics at [Streams and topics]({about_topics_help_url}).\")\n ).format(about_topics_help_url=\"/help/streams-and-topics\")\n\n content_of_swimming_turtles_topic = (\n _(\n \"This is a message on stream #**{default_notification_stream_name}** with the \"\n \"topic `swimming turtles`.\"\n )\n + \"\\n\"\n \"\\n\"\n \"[](/static/images/cute/turtle.png)\"\n \"\\n\"\n \"\\n\"\n + _(\n \"[Start a new topic]({start_topic_help_url}) any time you're not replying to a \\\n previous message.\"\n )\n ).format(\n default_notification_stream_name=Realm.DEFAULT_NOTIFICATION_STREAM_NAME,\n start_topic_help_url=\"/help/start-a-new-topic\",\n )\n\n welcome_messages: List[Dict[str, str]] = [\n {\n \"stream\": Realm.INITIAL_PRIVATE_STREAM_NAME,\n \"topic\": \"private streams\",\n \"content\": content_of_private_streams_topic,\n },\n {\n \"stream\": Realm.DEFAULT_NOTIFICATION_STREAM_NAME,\n \"topic\": \"topic demonstration\",\n \"content\": content1_of_topic_demonstration_topic,\n },\n {\n \"stream\": Realm.DEFAULT_NOTIFICATION_STREAM_NAME,\n \"topic\": \"topic demonstration\",\n \"content\": content2_of_topic_demonstration_topic,\n },\n {\n \"stream\": realm.DEFAULT_NOTIFICATION_STREAM_NAME,\n \"topic\": \"swimming turtles\",\n \"content\": content_of_swimming_turtles_topic,\n },\n ]\n\n messages = [\n internal_prep_stream_message_by_name(\n realm,\n welcome_bot,\n message[\"stream\"],\n message[\"topic\"],\n message[\"content\"],\n )\n for message in welcome_messages\n ]\n message_ids = do_send_messages(messages)\n\n # We find the one of our just-sent messages with turtle.png in it,\n # and react to it. This is a bit hacky, but works and is kinda a\n # 1-off thing.\n turtle_message = Message.objects.select_for_update().get(\n id__in=message_ids, content__icontains=\"cute/turtle.png\"\n )\n (emoji_code, reaction_type) = emoji_name_to_emoji_code(realm, \"turtle\")\n do_add_reaction(welcome_bot, turtle_message, \"turtle\", emoji_code, reaction_type)\n", "path": "zerver/lib/onboarding.py"}]} | 4,022 | 123 |
gh_patches_debug_27161 | rasdani/github-patches | git_diff | ytdl-org__youtube-dl-18228 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add support for nzz.ch
rudolffischer@BueroPC-RF:~$ youtube-dl "http://www.nzz.ch/panorama/30-jahre-herzschmerz-aus-saas-fee-1.18438209" -v
[debug] System config: []
[debug] User config: []
[debug] Command-line args: ['http://www.nzz.ch/panorama/30-jahre-herzschmerz-aus-saas-fee-1.18438209', '-v']
[debug] Encodings: locale UTF-8, fs UTF-8, out UTF-8, pref UTF-8
[debug] youtube-dl version 2014.12.06.1
[debug] Python version 2.7.6 - Linux-3.13.0-39-generic-x86_64-with-Ubuntu-14.04-trusty
[debug] exe versions: rtmpdump 2.4
[debug] Proxy map: {}
[generic] 30-jahre-herzschmerz-aus-saas-fee-1: Requesting header
WARNING: Falling back on generic information extractor.
[generic] 30-jahre-herzschmerz-aus-saas-fee-1: Downloading webpage
[generic] 30-jahre-herzschmerz-aus-saas-fee-1: Extracting information
ERROR: Unsupported URL: http://www.nzz.ch/panorama/30-jahre-herzschmerz-aus-saas-fee-1.18438209; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
Traceback (most recent call last):
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/generic.py", line 651, in _real_extract
doc = parse_xml(webpage)
File "/usr/local/bin/youtube-dl/youtube_dl/utils.py", line 1425, in parse_xml
tree = xml.etree.ElementTree.XML(s.encode('utf-8'), **kwargs)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1300, in XML
parser.feed(text)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1642, in feed
self._raiseerror(v)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1506, in _raiseerror
raise err
ParseError: not well-formed (invalid token): line 2, column 42
Traceback (most recent call last):
File "/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 553, in extract_info
ie_result = ie.extract(url)
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py", line 241, in extract
return self._real_extract(url)
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/generic.py", line 1044, in _real_extract
raise ExtractorError('Unsupported URL: %s' % url)
ExtractorError: Unsupported URL: http://www.nzz.ch/panorama/30-jahre-herzschmerz-aus-saas-fee-1.18438209; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
rudolffischer@BueroPC-RF:~$
</issue>
<code>
[start of youtube_dl/extractor/nzz.py]
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import re
5
6 from .common import InfoExtractor
7 from ..utils import (
8 extract_attributes,
9 )
10
11
12 class NZZIE(InfoExtractor):
13 _VALID_URL = r'https?://(?:www\.)?nzz\.ch/(?:[^/]+/)*[^/?#]+-ld\.(?P<id>\d+)'
14 _TEST = {
15 'url': 'http://www.nzz.ch/zuerich/gymizyte/gymizyte-schreiben-schueler-heute-noch-diktate-ld.9153',
16 'info_dict': {
17 'id': '9153',
18 },
19 'playlist_mincount': 6,
20 }
21
22 def _real_extract(self, url):
23 page_id = self._match_id(url)
24 webpage = self._download_webpage(url, page_id)
25
26 entries = []
27 for player_element in re.findall(r'(<[^>]+class="kalturaPlayer"[^>]*>)', webpage):
28 player_params = extract_attributes(player_element)
29 if player_params.get('data-type') not in ('kaltura_singleArticle',):
30 self.report_warning('Unsupported player type')
31 continue
32 entry_id = player_params['data-id']
33 entries.append(self.url_result(
34 'kaltura:1750922:' + entry_id, 'Kaltura', entry_id))
35
36 return self.playlist_result(entries, page_id)
37
[end of youtube_dl/extractor/nzz.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/youtube_dl/extractor/nzz.py b/youtube_dl/extractor/nzz.py
--- a/youtube_dl/extractor/nzz.py
+++ b/youtube_dl/extractor/nzz.py
@@ -11,20 +11,27 @@
class NZZIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?nzz\.ch/(?:[^/]+/)*[^/?#]+-ld\.(?P<id>\d+)'
- _TEST = {
+ _TESTS = [{
'url': 'http://www.nzz.ch/zuerich/gymizyte/gymizyte-schreiben-schueler-heute-noch-diktate-ld.9153',
'info_dict': {
'id': '9153',
},
'playlist_mincount': 6,
- }
+ }, {
+ 'url': 'https://www.nzz.ch/video/nzz-standpunkte/cvp-auf-der-suche-nach-dem-mass-der-mitte-ld.1368112',
+ 'info_dict': {
+ 'id': '1368112',
+ },
+ 'playlist_count': 1,
+ }]
def _real_extract(self, url):
page_id = self._match_id(url)
webpage = self._download_webpage(url, page_id)
entries = []
- for player_element in re.findall(r'(<[^>]+class="kalturaPlayer"[^>]*>)', webpage):
+ for player_element in re.findall(
+ r'(<[^>]+class="kalturaPlayer[^"]*"[^>]*>)', webpage):
player_params = extract_attributes(player_element)
if player_params.get('data-type') not in ('kaltura_singleArticle',):
self.report_warning('Unsupported player type')
| {"golden_diff": "diff --git a/youtube_dl/extractor/nzz.py b/youtube_dl/extractor/nzz.py\n--- a/youtube_dl/extractor/nzz.py\n+++ b/youtube_dl/extractor/nzz.py\n@@ -11,20 +11,27 @@\n \n class NZZIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?nzz\\.ch/(?:[^/]+/)*[^/?#]+-ld\\.(?P<id>\\d+)'\n- _TEST = {\n+ _TESTS = [{\n 'url': 'http://www.nzz.ch/zuerich/gymizyte/gymizyte-schreiben-schueler-heute-noch-diktate-ld.9153',\n 'info_dict': {\n 'id': '9153',\n },\n 'playlist_mincount': 6,\n- }\n+ }, {\n+ 'url': 'https://www.nzz.ch/video/nzz-standpunkte/cvp-auf-der-suche-nach-dem-mass-der-mitte-ld.1368112',\n+ 'info_dict': {\n+ 'id': '1368112',\n+ },\n+ 'playlist_count': 1,\n+ }]\n \n def _real_extract(self, url):\n page_id = self._match_id(url)\n webpage = self._download_webpage(url, page_id)\n \n entries = []\n- for player_element in re.findall(r'(<[^>]+class=\"kalturaPlayer\"[^>]*>)', webpage):\n+ for player_element in re.findall(\n+ r'(<[^>]+class=\"kalturaPlayer[^\"]*\"[^>]*>)', webpage):\n player_params = extract_attributes(player_element)\n if player_params.get('data-type') not in ('kaltura_singleArticle',):\n self.report_warning('Unsupported player type')\n", "issue": "Add support for nzz.ch\nrudolffischer@BueroPC-RF:~$ youtube-dl \"http://www.nzz.ch/panorama/30-jahre-herzschmerz-aus-saas-fee-1.18438209\" -v\n[debug] System config: []\n[debug] User config: []\n[debug] Command-line args: ['http://www.nzz.ch/panorama/30-jahre-herzschmerz-aus-saas-fee-1.18438209', '-v']\n[debug] Encodings: locale UTF-8, fs UTF-8, out UTF-8, pref UTF-8\n[debug] youtube-dl version 2014.12.06.1\n[debug] Python version 2.7.6 - Linux-3.13.0-39-generic-x86_64-with-Ubuntu-14.04-trusty\n[debug] exe versions: rtmpdump 2.4\n[debug] Proxy map: {}\n[generic] 30-jahre-herzschmerz-aus-saas-fee-1: Requesting header\nWARNING: Falling back on generic information extractor.\n[generic] 30-jahre-herzschmerz-aus-saas-fee-1: Downloading webpage\n[generic] 30-jahre-herzschmerz-aus-saas-fee-1: Extracting information\nERROR: Unsupported URL: http://www.nzz.ch/panorama/30-jahre-herzschmerz-aus-saas-fee-1.18438209; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.\nTraceback (most recent call last):\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/generic.py\", line 651, in _real_extract\n doc = parse_xml(webpage)\n File \"/usr/local/bin/youtube-dl/youtube_dl/utils.py\", line 1425, in parse_xml\n tree = xml.etree.ElementTree.XML(s.encode('utf-8'), **kwargs)\n File \"/usr/lib/python2.7/xml/etree/ElementTree.py\", line 1300, in XML\n parser.feed(text)\n File \"/usr/lib/python2.7/xml/etree/ElementTree.py\", line 1642, in feed\n self._raiseerror(v)\n File \"/usr/lib/python2.7/xml/etree/ElementTree.py\", line 1506, in _raiseerror\n raise err\nParseError: not well-formed (invalid token): line 2, column 42\nTraceback (most recent call last):\n File \"/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py\", line 553, in extract_info\n ie_result = ie.extract(url)\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py\", line 241, in extract\n return self._real_extract(url)\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/generic.py\", line 1044, in _real_extract\n raise ExtractorError('Unsupported URL: %s' % url)\nExtractorError: Unsupported URL: http://www.nzz.ch/panorama/30-jahre-herzschmerz-aus-saas-fee-1.18438209; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.\n\nrudolffischer@BueroPC-RF:~$ \n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n extract_attributes,\n)\n\n\nclass NZZIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?nzz\\.ch/(?:[^/]+/)*[^/?#]+-ld\\.(?P<id>\\d+)'\n _TEST = {\n 'url': 'http://www.nzz.ch/zuerich/gymizyte/gymizyte-schreiben-schueler-heute-noch-diktate-ld.9153',\n 'info_dict': {\n 'id': '9153',\n },\n 'playlist_mincount': 6,\n }\n\n def _real_extract(self, url):\n page_id = self._match_id(url)\n webpage = self._download_webpage(url, page_id)\n\n entries = []\n for player_element in re.findall(r'(<[^>]+class=\"kalturaPlayer\"[^>]*>)', webpage):\n player_params = extract_attributes(player_element)\n if player_params.get('data-type') not in ('kaltura_singleArticle',):\n self.report_warning('Unsupported player type')\n continue\n entry_id = player_params['data-id']\n entries.append(self.url_result(\n 'kaltura:1750922:' + entry_id, 'Kaltura', entry_id))\n\n return self.playlist_result(entries, page_id)\n", "path": "youtube_dl/extractor/nzz.py"}]} | 1,816 | 420 |
gh_patches_debug_25079 | rasdani/github-patches | git_diff | Kinto__kinto-630 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Enabling the flush endpoint through env vars does not seem to work
I'm running Kinto + postgres with docker-compose (using the example docker-compose.yml in the repo).
Adding `KINTO_FLUSH_ENDPOINT_ENABLED` to the environment section in docker-compose.yml does not enable the flush endpoint for me. I instead had to add `kinto.flush_endpoint_enabled = true` to a custom ini file, that worked.
Can the flush endpoint be enabled through an env var like this?
</issue>
<code>
[start of kinto/__init__.py]
1 import pkg_resources
2 import logging
3
4 import kinto.core
5 from pyramid.config import Configurator
6 from pyramid.settings import asbool
7 from pyramid.security import Authenticated
8
9 from kinto.authorization import RouteFactory
10
11 # Module version, as defined in PEP-0396.
12 __version__ = pkg_resources.get_distribution(__package__).version
13
14 # Implemented HTTP API Version
15 HTTP_API_VERSION = '1.5'
16
17 # Main kinto logger
18 logger = logging.getLogger(__name__)
19
20
21 DEFAULT_SETTINGS = {
22 'retry_after_seconds': 3,
23 'cache_backend': 'kinto.core.cache.memory',
24 'permission_backend': 'kinto.core.permission.memory',
25 'storage_backend': 'kinto.core.storage.memory',
26 'project_docs': 'https://kinto.readthedocs.io/',
27 'bucket_create_principals': Authenticated,
28 'multiauth.authorization_policy': (
29 'kinto.authorization.AuthorizationPolicy'),
30 'experimental_collection_schema_validation': 'False',
31 'http_api_version': HTTP_API_VERSION
32 }
33
34
35 def main(global_config, config=None, **settings):
36 if not config:
37 config = Configurator(settings=settings, root_factory=RouteFactory)
38
39 # Force project name, since it determines settings prefix.
40 config.add_settings({'kinto.project_name': 'kinto'})
41
42 kinto.core.initialize(config,
43 version=__version__,
44 default_settings=DEFAULT_SETTINGS)
45
46 settings = config.get_settings()
47
48 # Expose capability
49 schema_enabled = asbool(
50 settings['experimental_collection_schema_validation']
51 )
52 if schema_enabled:
53 config.add_api_capability(
54 "schema",
55 description="Validates collection records with JSON schemas.",
56 url="http://kinto.readthedocs.io/en/latest/api/1.x/"
57 "collections.html#collection-json-schema")
58
59 # Scan Kinto views.
60 kwargs = {}
61 flush_enabled = asbool(settings.get('flush_endpoint_enabled'))
62
63 if flush_enabled:
64 config.add_api_capability(
65 "flush_endpoint",
66 description="The __flush__ endpoint can be used to remove all "
67 "data from all backends.",
68 url="http://kinto.readthedocs.io/en/latest/configuration/"
69 "settings.html#activating-the-flush-endpoint"
70 )
71 else:
72 kwargs['ignore'] = 'kinto.views.flush'
73 config.scan("kinto.views", **kwargs)
74
75 app = config.make_wsgi_app()
76
77 # Install middleware (idempotent if disabled)
78 return kinto.core.install_middlewares(app, settings)
79
[end of kinto/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kinto/__init__.py b/kinto/__init__.py
--- a/kinto/__init__.py
+++ b/kinto/__init__.py
@@ -19,6 +19,7 @@
DEFAULT_SETTINGS = {
+ 'flush_endpoint_enabled': False,
'retry_after_seconds': 3,
'cache_backend': 'kinto.core.cache.memory',
'permission_backend': 'kinto.core.permission.memory',
@@ -58,18 +59,18 @@
# Scan Kinto views.
kwargs = {}
- flush_enabled = asbool(settings.get('flush_endpoint_enabled'))
+ flush_enabled = asbool(settings.get('flush_endpoint_enabled'))
if flush_enabled:
config.add_api_capability(
"flush_endpoint",
description="The __flush__ endpoint can be used to remove all "
"data from all backends.",
url="http://kinto.readthedocs.io/en/latest/configuration/"
- "settings.html#activating-the-flush-endpoint"
- )
+ "settings.html#activating-the-flush-endpoint")
else:
kwargs['ignore'] = 'kinto.views.flush'
+
config.scan("kinto.views", **kwargs)
app = config.make_wsgi_app()
| {"golden_diff": "diff --git a/kinto/__init__.py b/kinto/__init__.py\n--- a/kinto/__init__.py\n+++ b/kinto/__init__.py\n@@ -19,6 +19,7 @@\n \n \n DEFAULT_SETTINGS = {\n+ 'flush_endpoint_enabled': False,\n 'retry_after_seconds': 3,\n 'cache_backend': 'kinto.core.cache.memory',\n 'permission_backend': 'kinto.core.permission.memory',\n@@ -58,18 +59,18 @@\n \n # Scan Kinto views.\n kwargs = {}\n- flush_enabled = asbool(settings.get('flush_endpoint_enabled'))\n \n+ flush_enabled = asbool(settings.get('flush_endpoint_enabled'))\n if flush_enabled:\n config.add_api_capability(\n \"flush_endpoint\",\n description=\"The __flush__ endpoint can be used to remove all \"\n \"data from all backends.\",\n url=\"http://kinto.readthedocs.io/en/latest/configuration/\"\n- \"settings.html#activating-the-flush-endpoint\"\n- )\n+ \"settings.html#activating-the-flush-endpoint\")\n else:\n kwargs['ignore'] = 'kinto.views.flush'\n+\n config.scan(\"kinto.views\", **kwargs)\n \n app = config.make_wsgi_app()\n", "issue": "Enabling the flush endpoint through env vars does not seem to work\nI'm running Kinto + postgres with docker-compose (using the example docker-compose.yml in the repo). \n\nAdding `KINTO_FLUSH_ENDPOINT_ENABLED` to the environment section in docker-compose.yml does not enable the flush endpoint for me. I instead had to add `kinto.flush_endpoint_enabled = true` to a custom ini file, that worked.\n\nCan the flush endpoint be enabled through an env var like this?\n\n", "before_files": [{"content": "import pkg_resources\nimport logging\n\nimport kinto.core\nfrom pyramid.config import Configurator\nfrom pyramid.settings import asbool\nfrom pyramid.security import Authenticated\n\nfrom kinto.authorization import RouteFactory\n\n# Module version, as defined in PEP-0396.\n__version__ = pkg_resources.get_distribution(__package__).version\n\n# Implemented HTTP API Version\nHTTP_API_VERSION = '1.5'\n\n# Main kinto logger\nlogger = logging.getLogger(__name__)\n\n\nDEFAULT_SETTINGS = {\n 'retry_after_seconds': 3,\n 'cache_backend': 'kinto.core.cache.memory',\n 'permission_backend': 'kinto.core.permission.memory',\n 'storage_backend': 'kinto.core.storage.memory',\n 'project_docs': 'https://kinto.readthedocs.io/',\n 'bucket_create_principals': Authenticated,\n 'multiauth.authorization_policy': (\n 'kinto.authorization.AuthorizationPolicy'),\n 'experimental_collection_schema_validation': 'False',\n 'http_api_version': HTTP_API_VERSION\n}\n\n\ndef main(global_config, config=None, **settings):\n if not config:\n config = Configurator(settings=settings, root_factory=RouteFactory)\n\n # Force project name, since it determines settings prefix.\n config.add_settings({'kinto.project_name': 'kinto'})\n\n kinto.core.initialize(config,\n version=__version__,\n default_settings=DEFAULT_SETTINGS)\n\n settings = config.get_settings()\n\n # Expose capability\n schema_enabled = asbool(\n settings['experimental_collection_schema_validation']\n )\n if schema_enabled:\n config.add_api_capability(\n \"schema\",\n description=\"Validates collection records with JSON schemas.\",\n url=\"http://kinto.readthedocs.io/en/latest/api/1.x/\"\n \"collections.html#collection-json-schema\")\n\n # Scan Kinto views.\n kwargs = {}\n flush_enabled = asbool(settings.get('flush_endpoint_enabled'))\n\n if flush_enabled:\n config.add_api_capability(\n \"flush_endpoint\",\n description=\"The __flush__ endpoint can be used to remove all \"\n \"data from all backends.\",\n url=\"http://kinto.readthedocs.io/en/latest/configuration/\"\n \"settings.html#activating-the-flush-endpoint\"\n )\n else:\n kwargs['ignore'] = 'kinto.views.flush'\n config.scan(\"kinto.views\", **kwargs)\n\n app = config.make_wsgi_app()\n\n # Install middleware (idempotent if disabled)\n return kinto.core.install_middlewares(app, settings)\n", "path": "kinto/__init__.py"}]} | 1,320 | 272 |
gh_patches_debug_42551 | rasdani/github-patches | git_diff | feast-dev__feast-2348 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Docs] Explain DynamoDB online_write_batch uses a batch_writer
## Expected Behavior
`DynamoDBOnlineStore` method `online_write_batch` uses a [BatchWriter](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/dynamodb.html#batch-writing) under the hood.
User documentation should explain the advantages of using it, including
> automatically handle buffering and sending items in batches. In addition, the batch writer will also automatically handle any unprocessed items and resend them as needed.
</issue>
<code>
[start of sdk/python/feast/infra/online_stores/dynamodb.py]
1 # Copyright 2021 The Feast Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import logging
15 from datetime import datetime
16 from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple
17
18 from pydantic import StrictStr
19 from pydantic.typing import Literal
20
21 from feast import Entity, FeatureView, utils
22 from feast.infra.infra_object import DYNAMODB_INFRA_OBJECT_CLASS_TYPE, InfraObject
23 from feast.infra.online_stores.helpers import compute_entity_id
24 from feast.infra.online_stores.online_store import OnlineStore
25 from feast.protos.feast.core.DynamoDBTable_pb2 import (
26 DynamoDBTable as DynamoDBTableProto,
27 )
28 from feast.protos.feast.core.InfraObject_pb2 import InfraObject as InfraObjectProto
29 from feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto
30 from feast.protos.feast.types.Value_pb2 import Value as ValueProto
31 from feast.repo_config import FeastConfigBaseModel, RepoConfig
32 from feast.usage import log_exceptions_and_usage, tracing_span
33
34 try:
35 import boto3
36 from botocore.exceptions import ClientError
37 except ImportError as e:
38 from feast.errors import FeastExtrasDependencyImportError
39
40 raise FeastExtrasDependencyImportError("aws", str(e))
41
42
43 logger = logging.getLogger(__name__)
44
45
46 class DynamoDBOnlineStoreConfig(FeastConfigBaseModel):
47 """Online store config for DynamoDB store"""
48
49 type: Literal["dynamodb"] = "dynamodb"
50 """Online store type selector"""
51
52 region: StrictStr
53 """ AWS Region Name """
54
55
56 class DynamoDBOnlineStore(OnlineStore):
57 """
58 Online feature store for AWS DynamoDB.
59 """
60
61 _dynamodb_client = None
62 _dynamodb_resource = None
63
64 @log_exceptions_and_usage(online_store="dynamodb")
65 def update(
66 self,
67 config: RepoConfig,
68 tables_to_delete: Sequence[FeatureView],
69 tables_to_keep: Sequence[FeatureView],
70 entities_to_delete: Sequence[Entity],
71 entities_to_keep: Sequence[Entity],
72 partial: bool,
73 ):
74 online_config = config.online_store
75 assert isinstance(online_config, DynamoDBOnlineStoreConfig)
76 dynamodb_client = self._get_dynamodb_client(online_config.region)
77 dynamodb_resource = self._get_dynamodb_resource(online_config.region)
78
79 for table_instance in tables_to_keep:
80 try:
81 dynamodb_resource.create_table(
82 TableName=_get_table_name(config, table_instance),
83 KeySchema=[{"AttributeName": "entity_id", "KeyType": "HASH"}],
84 AttributeDefinitions=[
85 {"AttributeName": "entity_id", "AttributeType": "S"}
86 ],
87 BillingMode="PAY_PER_REQUEST",
88 )
89 except ClientError as ce:
90 # If the table creation fails with ResourceInUseException,
91 # it means the table already exists or is being created.
92 # Otherwise, re-raise the exception
93 if ce.response["Error"]["Code"] != "ResourceInUseException":
94 raise
95
96 for table_instance in tables_to_keep:
97 dynamodb_client.get_waiter("table_exists").wait(
98 TableName=_get_table_name(config, table_instance)
99 )
100
101 for table_to_delete in tables_to_delete:
102 _delete_table_idempotent(
103 dynamodb_resource, _get_table_name(config, table_to_delete)
104 )
105
106 def teardown(
107 self,
108 config: RepoConfig,
109 tables: Sequence[FeatureView],
110 entities: Sequence[Entity],
111 ):
112 online_config = config.online_store
113 assert isinstance(online_config, DynamoDBOnlineStoreConfig)
114 dynamodb_resource = self._get_dynamodb_resource(online_config.region)
115
116 for table in tables:
117 _delete_table_idempotent(dynamodb_resource, _get_table_name(config, table))
118
119 @log_exceptions_and_usage(online_store="dynamodb")
120 def online_write_batch(
121 self,
122 config: RepoConfig,
123 table: FeatureView,
124 data: List[
125 Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]
126 ],
127 progress: Optional[Callable[[int], Any]],
128 ) -> None:
129 online_config = config.online_store
130 assert isinstance(online_config, DynamoDBOnlineStoreConfig)
131 dynamodb_resource = self._get_dynamodb_resource(online_config.region)
132
133 table_instance = dynamodb_resource.Table(_get_table_name(config, table))
134 with table_instance.batch_writer() as batch:
135 for entity_key, features, timestamp, created_ts in data:
136 entity_id = compute_entity_id(entity_key)
137 batch.put_item(
138 Item={
139 "entity_id": entity_id, # PartitionKey
140 "event_ts": str(utils.make_tzaware(timestamp)),
141 "values": {
142 k: v.SerializeToString()
143 for k, v in features.items() # Serialized Features
144 },
145 }
146 )
147 if progress:
148 progress(1)
149
150 @log_exceptions_and_usage(online_store="dynamodb")
151 def online_read(
152 self,
153 config: RepoConfig,
154 table: FeatureView,
155 entity_keys: List[EntityKeyProto],
156 requested_features: Optional[List[str]] = None,
157 ) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:
158 online_config = config.online_store
159 assert isinstance(online_config, DynamoDBOnlineStoreConfig)
160 dynamodb_resource = self._get_dynamodb_resource(online_config.region)
161
162 result: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]] = []
163 for entity_key in entity_keys:
164 table_instance = dynamodb_resource.Table(_get_table_name(config, table))
165 entity_id = compute_entity_id(entity_key)
166 with tracing_span(name="remote_call"):
167 response = table_instance.get_item(Key={"entity_id": entity_id})
168 value = response.get("Item")
169
170 if value is not None:
171 res = {}
172 for feature_name, value_bin in value["values"].items():
173 val = ValueProto()
174 val.ParseFromString(value_bin.value)
175 res[feature_name] = val
176 result.append((datetime.fromisoformat(value["event_ts"]), res))
177 else:
178 result.append((None, None))
179 return result
180
181 def _get_dynamodb_client(self, region: str):
182 if self._dynamodb_client is None:
183 self._dynamodb_client = _initialize_dynamodb_client(region)
184 return self._dynamodb_client
185
186 def _get_dynamodb_resource(self, region: str):
187 if self._dynamodb_resource is None:
188 self._dynamodb_resource = _initialize_dynamodb_resource(region)
189 return self._dynamodb_resource
190
191
192 def _initialize_dynamodb_client(region: str):
193 return boto3.client("dynamodb", region_name=region)
194
195
196 def _initialize_dynamodb_resource(region: str):
197 return boto3.resource("dynamodb", region_name=region)
198
199
200 def _get_table_name(config: RepoConfig, table: FeatureView) -> str:
201 return f"{config.project}.{table.name}"
202
203
204 def _delete_table_idempotent(
205 dynamodb_resource, table_name: str,
206 ):
207 try:
208 table = dynamodb_resource.Table(table_name)
209 table.delete()
210 logger.info(f"Dynamo table {table_name} was deleted")
211 except ClientError as ce:
212 # If the table deletion fails with ResourceNotFoundException,
213 # it means the table has already been deleted.
214 # Otherwise, re-raise the exception
215 if ce.response["Error"]["Code"] != "ResourceNotFoundException":
216 raise
217 else:
218 logger.warning(f"Trying to delete table that doesn't exist: {table_name}")
219
220
221 class DynamoDBTable(InfraObject):
222 """
223 A DynamoDB table managed by Feast.
224
225 Attributes:
226 name: The name of the table.
227 region: The region of the table.
228 """
229
230 region: str
231
232 def __init__(self, name: str, region: str):
233 super().__init__(name)
234 self.region = region
235
236 def to_infra_object_proto(self) -> InfraObjectProto:
237 dynamodb_table_proto = self.to_proto()
238 return InfraObjectProto(
239 infra_object_class_type=DYNAMODB_INFRA_OBJECT_CLASS_TYPE,
240 dynamodb_table=dynamodb_table_proto,
241 )
242
243 def to_proto(self) -> Any:
244 dynamodb_table_proto = DynamoDBTableProto()
245 dynamodb_table_proto.name = self.name
246 dynamodb_table_proto.region = self.region
247 return dynamodb_table_proto
248
249 @staticmethod
250 def from_infra_object_proto(infra_object_proto: InfraObjectProto) -> Any:
251 return DynamoDBTable(
252 name=infra_object_proto.dynamodb_table.name,
253 region=infra_object_proto.dynamodb_table.region,
254 )
255
256 @staticmethod
257 def from_proto(dynamodb_table_proto: DynamoDBTableProto) -> Any:
258 return DynamoDBTable(
259 name=dynamodb_table_proto.name, region=dynamodb_table_proto.region,
260 )
261
262 def update(self):
263 dynamodb_client = _initialize_dynamodb_client(region=self.region)
264 dynamodb_resource = _initialize_dynamodb_resource(region=self.region)
265
266 try:
267 dynamodb_resource.create_table(
268 TableName=f"{self.name}",
269 KeySchema=[{"AttributeName": "entity_id", "KeyType": "HASH"}],
270 AttributeDefinitions=[
271 {"AttributeName": "entity_id", "AttributeType": "S"}
272 ],
273 BillingMode="PAY_PER_REQUEST",
274 )
275 except ClientError as ce:
276 # If the table creation fails with ResourceInUseException,
277 # it means the table already exists or is being created.
278 # Otherwise, re-raise the exception
279 if ce.response["Error"]["Code"] != "ResourceInUseException":
280 raise
281
282 dynamodb_client.get_waiter("table_exists").wait(TableName=f"{self.name}")
283
284 def teardown(self):
285 dynamodb_resource = _initialize_dynamodb_resource(region=self.region)
286 _delete_table_idempotent(dynamodb_resource, self.name)
287
[end of sdk/python/feast/infra/online_stores/dynamodb.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sdk/python/feast/infra/online_stores/dynamodb.py b/sdk/python/feast/infra/online_stores/dynamodb.py
--- a/sdk/python/feast/infra/online_stores/dynamodb.py
+++ b/sdk/python/feast/infra/online_stores/dynamodb.py
@@ -56,6 +56,10 @@
class DynamoDBOnlineStore(OnlineStore):
"""
Online feature store for AWS DynamoDB.
+
+ Attributes:
+ _dynamodb_client: Boto3 DynamoDB client.
+ _dynamodb_resource: Boto3 DynamoDB resource.
"""
_dynamodb_client = None
@@ -71,6 +75,14 @@
entities_to_keep: Sequence[Entity],
partial: bool,
):
+ """
+ Update tables from the DynamoDB Online Store.
+
+ Args:
+ config: The RepoConfig for the current FeatureStore.
+ tables_to_delete: Tables to delete from the DynamoDB Online Store.
+ tables_to_keep: Tables to keep in the DynamoDB Online Store.
+ """
online_config = config.online_store
assert isinstance(online_config, DynamoDBOnlineStoreConfig)
dynamodb_client = self._get_dynamodb_client(online_config.region)
@@ -109,6 +121,13 @@
tables: Sequence[FeatureView],
entities: Sequence[Entity],
):
+ """
+ Delete tables from the DynamoDB Online Store.
+
+ Args:
+ config: The RepoConfig for the current FeatureStore.
+ tables: Tables to delete from the feature repo.
+ """
online_config = config.online_store
assert isinstance(online_config, DynamoDBOnlineStoreConfig)
dynamodb_resource = self._get_dynamodb_resource(online_config.region)
@@ -126,6 +145,21 @@
],
progress: Optional[Callable[[int], Any]],
) -> None:
+ """
+ Write a batch of feature rows to online DynamoDB store.
+
+ Note: This method applies a ``batch_writer`` to automatically handle any unprocessed items
+ and resend them as needed, this is useful if you're loading a lot of data at a time.
+
+ Args:
+ config: The RepoConfig for the current FeatureStore.
+ table: Feast FeatureView.
+ data: a list of quadruplets containing Feature data. Each quadruplet contains an Entity Key,
+ a dict containing feature values, an event timestamp for the row, and
+ the created timestamp for the row if it exists.
+ progress: Optional function to be called once every mini-batch of rows is written to
+ the online store. Can be used to display progress.
+ """
online_config = config.online_store
assert isinstance(online_config, DynamoDBOnlineStoreConfig)
dynamodb_resource = self._get_dynamodb_resource(online_config.region)
@@ -155,6 +189,17 @@
entity_keys: List[EntityKeyProto],
requested_features: Optional[List[str]] = None,
) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:
+ """
+ Retrieve feature values from the online DynamoDB store.
+
+ Note: This method is currently not optimized to retrieve a lot of data at a time
+ as it does sequential gets from the DynamoDB table.
+
+ Args:
+ config: The RepoConfig for the current FeatureStore.
+ table: Feast FeatureView.
+ entity_keys: a list of entity keys that should be read from the FeatureStore.
+ """
online_config = config.online_store
assert isinstance(online_config, DynamoDBOnlineStoreConfig)
dynamodb_resource = self._get_dynamodb_resource(online_config.region)
| {"golden_diff": "diff --git a/sdk/python/feast/infra/online_stores/dynamodb.py b/sdk/python/feast/infra/online_stores/dynamodb.py\n--- a/sdk/python/feast/infra/online_stores/dynamodb.py\n+++ b/sdk/python/feast/infra/online_stores/dynamodb.py\n@@ -56,6 +56,10 @@\n class DynamoDBOnlineStore(OnlineStore):\n \"\"\"\n Online feature store for AWS DynamoDB.\n+\n+ Attributes:\n+ _dynamodb_client: Boto3 DynamoDB client.\n+ _dynamodb_resource: Boto3 DynamoDB resource.\n \"\"\"\n \n _dynamodb_client = None\n@@ -71,6 +75,14 @@\n entities_to_keep: Sequence[Entity],\n partial: bool,\n ):\n+ \"\"\"\n+ Update tables from the DynamoDB Online Store.\n+\n+ Args:\n+ config: The RepoConfig for the current FeatureStore.\n+ tables_to_delete: Tables to delete from the DynamoDB Online Store.\n+ tables_to_keep: Tables to keep in the DynamoDB Online Store.\n+ \"\"\"\n online_config = config.online_store\n assert isinstance(online_config, DynamoDBOnlineStoreConfig)\n dynamodb_client = self._get_dynamodb_client(online_config.region)\n@@ -109,6 +121,13 @@\n tables: Sequence[FeatureView],\n entities: Sequence[Entity],\n ):\n+ \"\"\"\n+ Delete tables from the DynamoDB Online Store.\n+\n+ Args:\n+ config: The RepoConfig for the current FeatureStore.\n+ tables: Tables to delete from the feature repo.\n+ \"\"\"\n online_config = config.online_store\n assert isinstance(online_config, DynamoDBOnlineStoreConfig)\n dynamodb_resource = self._get_dynamodb_resource(online_config.region)\n@@ -126,6 +145,21 @@\n ],\n progress: Optional[Callable[[int], Any]],\n ) -> None:\n+ \"\"\"\n+ Write a batch of feature rows to online DynamoDB store.\n+\n+ Note: This method applies a ``batch_writer`` to automatically handle any unprocessed items\n+ and resend them as needed, this is useful if you're loading a lot of data at a time.\n+\n+ Args:\n+ config: The RepoConfig for the current FeatureStore.\n+ table: Feast FeatureView.\n+ data: a list of quadruplets containing Feature data. Each quadruplet contains an Entity Key,\n+ a dict containing feature values, an event timestamp for the row, and\n+ the created timestamp for the row if it exists.\n+ progress: Optional function to be called once every mini-batch of rows is written to\n+ the online store. Can be used to display progress.\n+ \"\"\"\n online_config = config.online_store\n assert isinstance(online_config, DynamoDBOnlineStoreConfig)\n dynamodb_resource = self._get_dynamodb_resource(online_config.region)\n@@ -155,6 +189,17 @@\n entity_keys: List[EntityKeyProto],\n requested_features: Optional[List[str]] = None,\n ) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:\n+ \"\"\"\n+ Retrieve feature values from the online DynamoDB store.\n+\n+ Note: This method is currently not optimized to retrieve a lot of data at a time\n+ as it does sequential gets from the DynamoDB table.\n+\n+ Args:\n+ config: The RepoConfig for the current FeatureStore.\n+ table: Feast FeatureView.\n+ entity_keys: a list of entity keys that should be read from the FeatureStore.\n+ \"\"\"\n online_config = config.online_store\n assert isinstance(online_config, DynamoDBOnlineStoreConfig)\n dynamodb_resource = self._get_dynamodb_resource(online_config.region)\n", "issue": "[Docs] Explain DynamoDB online_write_batch uses a batch_writer\n## Expected Behavior \r\n\r\n`DynamoDBOnlineStore` method `online_write_batch` uses a [BatchWriter](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/dynamodb.html#batch-writing) under the hood. \r\n\r\nUser documentation should explain the advantages of using it, including \r\n\r\n> automatically handle buffering and sending items in batches. In addition, the batch writer will also automatically handle any unprocessed items and resend them as needed. \n", "before_files": [{"content": "# Copyright 2021 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport logging\nfrom datetime import datetime\nfrom typing import Any, Callable, Dict, List, Optional, Sequence, Tuple\n\nfrom pydantic import StrictStr\nfrom pydantic.typing import Literal\n\nfrom feast import Entity, FeatureView, utils\nfrom feast.infra.infra_object import DYNAMODB_INFRA_OBJECT_CLASS_TYPE, InfraObject\nfrom feast.infra.online_stores.helpers import compute_entity_id\nfrom feast.infra.online_stores.online_store import OnlineStore\nfrom feast.protos.feast.core.DynamoDBTable_pb2 import (\n DynamoDBTable as DynamoDBTableProto,\n)\nfrom feast.protos.feast.core.InfraObject_pb2 import InfraObject as InfraObjectProto\nfrom feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto\nfrom feast.protos.feast.types.Value_pb2 import Value as ValueProto\nfrom feast.repo_config import FeastConfigBaseModel, RepoConfig\nfrom feast.usage import log_exceptions_and_usage, tracing_span\n\ntry:\n import boto3\n from botocore.exceptions import ClientError\nexcept ImportError as e:\n from feast.errors import FeastExtrasDependencyImportError\n\n raise FeastExtrasDependencyImportError(\"aws\", str(e))\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass DynamoDBOnlineStoreConfig(FeastConfigBaseModel):\n \"\"\"Online store config for DynamoDB store\"\"\"\n\n type: Literal[\"dynamodb\"] = \"dynamodb\"\n \"\"\"Online store type selector\"\"\"\n\n region: StrictStr\n \"\"\" AWS Region Name \"\"\"\n\n\nclass DynamoDBOnlineStore(OnlineStore):\n \"\"\"\n Online feature store for AWS DynamoDB.\n \"\"\"\n\n _dynamodb_client = None\n _dynamodb_resource = None\n\n @log_exceptions_and_usage(online_store=\"dynamodb\")\n def update(\n self,\n config: RepoConfig,\n tables_to_delete: Sequence[FeatureView],\n tables_to_keep: Sequence[FeatureView],\n entities_to_delete: Sequence[Entity],\n entities_to_keep: Sequence[Entity],\n partial: bool,\n ):\n online_config = config.online_store\n assert isinstance(online_config, DynamoDBOnlineStoreConfig)\n dynamodb_client = self._get_dynamodb_client(online_config.region)\n dynamodb_resource = self._get_dynamodb_resource(online_config.region)\n\n for table_instance in tables_to_keep:\n try:\n dynamodb_resource.create_table(\n TableName=_get_table_name(config, table_instance),\n KeySchema=[{\"AttributeName\": \"entity_id\", \"KeyType\": \"HASH\"}],\n AttributeDefinitions=[\n {\"AttributeName\": \"entity_id\", \"AttributeType\": \"S\"}\n ],\n BillingMode=\"PAY_PER_REQUEST\",\n )\n except ClientError as ce:\n # If the table creation fails with ResourceInUseException,\n # it means the table already exists or is being created.\n # Otherwise, re-raise the exception\n if ce.response[\"Error\"][\"Code\"] != \"ResourceInUseException\":\n raise\n\n for table_instance in tables_to_keep:\n dynamodb_client.get_waiter(\"table_exists\").wait(\n TableName=_get_table_name(config, table_instance)\n )\n\n for table_to_delete in tables_to_delete:\n _delete_table_idempotent(\n dynamodb_resource, _get_table_name(config, table_to_delete)\n )\n\n def teardown(\n self,\n config: RepoConfig,\n tables: Sequence[FeatureView],\n entities: Sequence[Entity],\n ):\n online_config = config.online_store\n assert isinstance(online_config, DynamoDBOnlineStoreConfig)\n dynamodb_resource = self._get_dynamodb_resource(online_config.region)\n\n for table in tables:\n _delete_table_idempotent(dynamodb_resource, _get_table_name(config, table))\n\n @log_exceptions_and_usage(online_store=\"dynamodb\")\n def online_write_batch(\n self,\n config: RepoConfig,\n table: FeatureView,\n data: List[\n Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]\n ],\n progress: Optional[Callable[[int], Any]],\n ) -> None:\n online_config = config.online_store\n assert isinstance(online_config, DynamoDBOnlineStoreConfig)\n dynamodb_resource = self._get_dynamodb_resource(online_config.region)\n\n table_instance = dynamodb_resource.Table(_get_table_name(config, table))\n with table_instance.batch_writer() as batch:\n for entity_key, features, timestamp, created_ts in data:\n entity_id = compute_entity_id(entity_key)\n batch.put_item(\n Item={\n \"entity_id\": entity_id, # PartitionKey\n \"event_ts\": str(utils.make_tzaware(timestamp)),\n \"values\": {\n k: v.SerializeToString()\n for k, v in features.items() # Serialized Features\n },\n }\n )\n if progress:\n progress(1)\n\n @log_exceptions_and_usage(online_store=\"dynamodb\")\n def online_read(\n self,\n config: RepoConfig,\n table: FeatureView,\n entity_keys: List[EntityKeyProto],\n requested_features: Optional[List[str]] = None,\n ) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:\n online_config = config.online_store\n assert isinstance(online_config, DynamoDBOnlineStoreConfig)\n dynamodb_resource = self._get_dynamodb_resource(online_config.region)\n\n result: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]] = []\n for entity_key in entity_keys:\n table_instance = dynamodb_resource.Table(_get_table_name(config, table))\n entity_id = compute_entity_id(entity_key)\n with tracing_span(name=\"remote_call\"):\n response = table_instance.get_item(Key={\"entity_id\": entity_id})\n value = response.get(\"Item\")\n\n if value is not None:\n res = {}\n for feature_name, value_bin in value[\"values\"].items():\n val = ValueProto()\n val.ParseFromString(value_bin.value)\n res[feature_name] = val\n result.append((datetime.fromisoformat(value[\"event_ts\"]), res))\n else:\n result.append((None, None))\n return result\n\n def _get_dynamodb_client(self, region: str):\n if self._dynamodb_client is None:\n self._dynamodb_client = _initialize_dynamodb_client(region)\n return self._dynamodb_client\n\n def _get_dynamodb_resource(self, region: str):\n if self._dynamodb_resource is None:\n self._dynamodb_resource = _initialize_dynamodb_resource(region)\n return self._dynamodb_resource\n\n\ndef _initialize_dynamodb_client(region: str):\n return boto3.client(\"dynamodb\", region_name=region)\n\n\ndef _initialize_dynamodb_resource(region: str):\n return boto3.resource(\"dynamodb\", region_name=region)\n\n\ndef _get_table_name(config: RepoConfig, table: FeatureView) -> str:\n return f\"{config.project}.{table.name}\"\n\n\ndef _delete_table_idempotent(\n dynamodb_resource, table_name: str,\n):\n try:\n table = dynamodb_resource.Table(table_name)\n table.delete()\n logger.info(f\"Dynamo table {table_name} was deleted\")\n except ClientError as ce:\n # If the table deletion fails with ResourceNotFoundException,\n # it means the table has already been deleted.\n # Otherwise, re-raise the exception\n if ce.response[\"Error\"][\"Code\"] != \"ResourceNotFoundException\":\n raise\n else:\n logger.warning(f\"Trying to delete table that doesn't exist: {table_name}\")\n\n\nclass DynamoDBTable(InfraObject):\n \"\"\"\n A DynamoDB table managed by Feast.\n\n Attributes:\n name: The name of the table.\n region: The region of the table.\n \"\"\"\n\n region: str\n\n def __init__(self, name: str, region: str):\n super().__init__(name)\n self.region = region\n\n def to_infra_object_proto(self) -> InfraObjectProto:\n dynamodb_table_proto = self.to_proto()\n return InfraObjectProto(\n infra_object_class_type=DYNAMODB_INFRA_OBJECT_CLASS_TYPE,\n dynamodb_table=dynamodb_table_proto,\n )\n\n def to_proto(self) -> Any:\n dynamodb_table_proto = DynamoDBTableProto()\n dynamodb_table_proto.name = self.name\n dynamodb_table_proto.region = self.region\n return dynamodb_table_proto\n\n @staticmethod\n def from_infra_object_proto(infra_object_proto: InfraObjectProto) -> Any:\n return DynamoDBTable(\n name=infra_object_proto.dynamodb_table.name,\n region=infra_object_proto.dynamodb_table.region,\n )\n\n @staticmethod\n def from_proto(dynamodb_table_proto: DynamoDBTableProto) -> Any:\n return DynamoDBTable(\n name=dynamodb_table_proto.name, region=dynamodb_table_proto.region,\n )\n\n def update(self):\n dynamodb_client = _initialize_dynamodb_client(region=self.region)\n dynamodb_resource = _initialize_dynamodb_resource(region=self.region)\n\n try:\n dynamodb_resource.create_table(\n TableName=f\"{self.name}\",\n KeySchema=[{\"AttributeName\": \"entity_id\", \"KeyType\": \"HASH\"}],\n AttributeDefinitions=[\n {\"AttributeName\": \"entity_id\", \"AttributeType\": \"S\"}\n ],\n BillingMode=\"PAY_PER_REQUEST\",\n )\n except ClientError as ce:\n # If the table creation fails with ResourceInUseException,\n # it means the table already exists or is being created.\n # Otherwise, re-raise the exception\n if ce.response[\"Error\"][\"Code\"] != \"ResourceInUseException\":\n raise\n\n dynamodb_client.get_waiter(\"table_exists\").wait(TableName=f\"{self.name}\")\n\n def teardown(self):\n dynamodb_resource = _initialize_dynamodb_resource(region=self.region)\n _delete_table_idempotent(dynamodb_resource, self.name)\n", "path": "sdk/python/feast/infra/online_stores/dynamodb.py"}]} | 3,669 | 829 |
gh_patches_debug_63214 | rasdani/github-patches | git_diff | ManimCommunity__manim-3108 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The documentation for the `--resolution` flag in the cli is wrong
The current documentation of the `--resolution` flag says the format is `(W,H)` which is confusing because the passed value needs to be of the form `"W,H"`so the documentation should be updatet accordingly such that it reflects the usage `-r "W,H"` best with an example of `-r "1920,1080"`
</issue>
<code>
[start of manim/cli/render/render_options.py]
1 from __future__ import annotations
2
3 import re
4
5 import click
6 from cloup import option, option_group
7
8 from manim.constants import QUALITIES, RendererType
9
10 from ... import logger
11
12
13 def validate_scene_range(ctx, param, value):
14 try:
15 start = int(value)
16 return (start,)
17 except Exception:
18 pass
19
20 if value:
21 try:
22 start, end = map(int, re.split(r"[;,\-]", value))
23 return start, end
24 except Exception:
25 logger.error("Couldn't determine a range for -n option.")
26 exit()
27
28
29 def validate_resolution(ctx, param, value):
30 if value:
31 try:
32 start, end = map(int, re.split(r"[;,\-]", value))
33 return (start, end)
34 except Exception:
35 logger.error("Resolution option is invalid.")
36 exit()
37
38
39 render_options = option_group(
40 "Render Options",
41 option(
42 "-n",
43 "--from_animation_number",
44 callback=validate_scene_range,
45 help="Start rendering from n_0 until n_1. If n_1 is left unspecified, "
46 "renders all scenes after n_0.",
47 default=None,
48 ),
49 option(
50 "-a",
51 "--write_all",
52 is_flag=True,
53 help="Render all scenes in the input file.",
54 default=None,
55 ),
56 option(
57 "--format",
58 type=click.Choice(["png", "gif", "mp4", "webm", "mov"], case_sensitive=False),
59 default=None,
60 ),
61 option("-s", "--save_last_frame", is_flag=True, default=None),
62 option(
63 "-q",
64 "--quality",
65 default=None,
66 type=click.Choice(
67 list(reversed([q["flag"] for q in QUALITIES.values() if q["flag"]])), # type: ignore
68 case_sensitive=False,
69 ),
70 help="Render quality at the follow resolution framerates, respectively: "
71 + ", ".join(
72 reversed(
73 [
74 f'{q["pixel_width"]}x{q["pixel_height"]} {q["frame_rate"]}FPS'
75 for q in QUALITIES.values()
76 if q["flag"]
77 ]
78 )
79 ),
80 ),
81 option(
82 "-r",
83 "--resolution",
84 callback=validate_resolution,
85 default=None,
86 help="Resolution in (W,H) for when 16:9 aspect ratio isn't possible.",
87 ),
88 option(
89 "--fps",
90 "--frame_rate",
91 "frame_rate",
92 type=float,
93 default=None,
94 help="Render at this frame rate.",
95 ),
96 option(
97 "--renderer",
98 type=click.Choice(
99 [renderer_type.value for renderer_type in RendererType],
100 case_sensitive=False,
101 ),
102 help="Select a renderer for your Scene.",
103 default="cairo",
104 ),
105 option(
106 "-g",
107 "--save_pngs",
108 is_flag=True,
109 default=None,
110 help="Save each frame as png (Deprecated).",
111 ),
112 option(
113 "-i",
114 "--save_as_gif",
115 default=None,
116 is_flag=True,
117 help="Save as a gif (Deprecated).",
118 ),
119 option(
120 "--save_sections",
121 default=None,
122 is_flag=True,
123 help="Save section videos in addition to movie file.",
124 ),
125 option(
126 "-s",
127 "--save_last_frame",
128 default=None,
129 is_flag=True,
130 help="Save last frame as png (Deprecated).",
131 ),
132 option(
133 "-t",
134 "--transparent",
135 is_flag=True,
136 help="Render scenes with alpha channel.",
137 ),
138 option(
139 "--use_projection_fill_shaders",
140 is_flag=True,
141 help="Use shaders for OpenGLVMobject fill which are compatible with transformation matrices.",
142 default=None,
143 ),
144 option(
145 "--use_projection_stroke_shaders",
146 is_flag=True,
147 help="Use shaders for OpenGLVMobject stroke which are compatible with transformation matrices.",
148 default=None,
149 ),
150 )
151
[end of manim/cli/render/render_options.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/manim/cli/render/render_options.py b/manim/cli/render/render_options.py
--- a/manim/cli/render/render_options.py
+++ b/manim/cli/render/render_options.py
@@ -83,7 +83,7 @@
"--resolution",
callback=validate_resolution,
default=None,
- help="Resolution in (W,H) for when 16:9 aspect ratio isn't possible.",
+ help='Resolution in "W,H" for when 16:9 aspect ratio isn\'t possible.',
),
option(
"--fps",
| {"golden_diff": "diff --git a/manim/cli/render/render_options.py b/manim/cli/render/render_options.py\n--- a/manim/cli/render/render_options.py\n+++ b/manim/cli/render/render_options.py\n@@ -83,7 +83,7 @@\n \"--resolution\",\n callback=validate_resolution,\n default=None,\n- help=\"Resolution in (W,H) for when 16:9 aspect ratio isn't possible.\",\n+ help='Resolution in \"W,H\" for when 16:9 aspect ratio isn\\'t possible.',\n ),\n option(\n \"--fps\",\n", "issue": "The documentation for the `--resolution` flag in the cli is wrong\nThe current documentation of the `--resolution` flag says the format is `(W,H)` which is confusing because the passed value needs to be of the form `\"W,H\"`so the documentation should be updatet accordingly such that it reflects the usage `-r \"W,H\"` best with an example of `-r \"1920,1080\"`\n", "before_files": [{"content": "from __future__ import annotations\n\nimport re\n\nimport click\nfrom cloup import option, option_group\n\nfrom manim.constants import QUALITIES, RendererType\n\nfrom ... import logger\n\n\ndef validate_scene_range(ctx, param, value):\n try:\n start = int(value)\n return (start,)\n except Exception:\n pass\n\n if value:\n try:\n start, end = map(int, re.split(r\"[;,\\-]\", value))\n return start, end\n except Exception:\n logger.error(\"Couldn't determine a range for -n option.\")\n exit()\n\n\ndef validate_resolution(ctx, param, value):\n if value:\n try:\n start, end = map(int, re.split(r\"[;,\\-]\", value))\n return (start, end)\n except Exception:\n logger.error(\"Resolution option is invalid.\")\n exit()\n\n\nrender_options = option_group(\n \"Render Options\",\n option(\n \"-n\",\n \"--from_animation_number\",\n callback=validate_scene_range,\n help=\"Start rendering from n_0 until n_1. If n_1 is left unspecified, \"\n \"renders all scenes after n_0.\",\n default=None,\n ),\n option(\n \"-a\",\n \"--write_all\",\n is_flag=True,\n help=\"Render all scenes in the input file.\",\n default=None,\n ),\n option(\n \"--format\",\n type=click.Choice([\"png\", \"gif\", \"mp4\", \"webm\", \"mov\"], case_sensitive=False),\n default=None,\n ),\n option(\"-s\", \"--save_last_frame\", is_flag=True, default=None),\n option(\n \"-q\",\n \"--quality\",\n default=None,\n type=click.Choice(\n list(reversed([q[\"flag\"] for q in QUALITIES.values() if q[\"flag\"]])), # type: ignore\n case_sensitive=False,\n ),\n help=\"Render quality at the follow resolution framerates, respectively: \"\n + \", \".join(\n reversed(\n [\n f'{q[\"pixel_width\"]}x{q[\"pixel_height\"]} {q[\"frame_rate\"]}FPS'\n for q in QUALITIES.values()\n if q[\"flag\"]\n ]\n )\n ),\n ),\n option(\n \"-r\",\n \"--resolution\",\n callback=validate_resolution,\n default=None,\n help=\"Resolution in (W,H) for when 16:9 aspect ratio isn't possible.\",\n ),\n option(\n \"--fps\",\n \"--frame_rate\",\n \"frame_rate\",\n type=float,\n default=None,\n help=\"Render at this frame rate.\",\n ),\n option(\n \"--renderer\",\n type=click.Choice(\n [renderer_type.value for renderer_type in RendererType],\n case_sensitive=False,\n ),\n help=\"Select a renderer for your Scene.\",\n default=\"cairo\",\n ),\n option(\n \"-g\",\n \"--save_pngs\",\n is_flag=True,\n default=None,\n help=\"Save each frame as png (Deprecated).\",\n ),\n option(\n \"-i\",\n \"--save_as_gif\",\n default=None,\n is_flag=True,\n help=\"Save as a gif (Deprecated).\",\n ),\n option(\n \"--save_sections\",\n default=None,\n is_flag=True,\n help=\"Save section videos in addition to movie file.\",\n ),\n option(\n \"-s\",\n \"--save_last_frame\",\n default=None,\n is_flag=True,\n help=\"Save last frame as png (Deprecated).\",\n ),\n option(\n \"-t\",\n \"--transparent\",\n is_flag=True,\n help=\"Render scenes with alpha channel.\",\n ),\n option(\n \"--use_projection_fill_shaders\",\n is_flag=True,\n help=\"Use shaders for OpenGLVMobject fill which are compatible with transformation matrices.\",\n default=None,\n ),\n option(\n \"--use_projection_stroke_shaders\",\n is_flag=True,\n help=\"Use shaders for OpenGLVMobject stroke which are compatible with transformation matrices.\",\n default=None,\n ),\n)\n", "path": "manim/cli/render/render_options.py"}]} | 1,829 | 122 |
gh_patches_debug_15376 | rasdani/github-patches | git_diff | beeware__toga-1850 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hide webview black background when resizing
### Describe the bug
By default in Windows, a webview will render a black background behind it. When resizing the webview, this black background becomes visible for a couple of frames while the html rendered on top is still in the process of updating, resulting in black "bars" appearing at the side of the webview when resizing it.
This can look quite ugly
### Steps to reproduce
see: https://youtu.be/Xj1rW4L-Pqk
```py
"""
My first application
"""
import toga
from toga.style import Pack
from toga.style.pack import COLUMN, ROW
class HelloWorld(toga.App):
def startup(self):
"""
Construct and show the Toga application.
Usually, you would add your application to a main content box.
We then create a main window (with a name matching the app), and
show the main window.
"""
web = toga.WebView(url='https://google.com')
self.main_window = toga.MainWindow(title=self.formal_name)
self.main_window.content = web
self.main_window.show()
def main():
helloWorld = HelloWorld("foobar",
"org.foo.bar")
return helloWorld
main().main_loop()
```
### Environment
- Operating System:
- Windows 11, winforms
- Python version:
- 3.10
- Software versions:
- Briefcase:
- 0.3.12
- Toga:
- 0.3.0
</issue>
<code>
[start of winforms/src/toga_winforms/widgets/webview.py]
1 import traceback
2 import webbrowser
3 from asyncio import get_event_loop
4
5 from travertino.size import at_least
6
7 import toga
8 from toga_winforms.keys import toga_key
9 from toga_winforms.libs import (
10 Action,
11 CoreWebView2CreationProperties,
12 String,
13 Task,
14 TaskScheduler,
15 Uri,
16 WebView2,
17 WebView2RuntimeNotFoundException,
18 WinForms,
19 )
20
21 from .base import Widget
22
23
24 class TogaWebBrowser(WebView2):
25 def __init__(self, interface):
26 super().__init__()
27 self.interface = interface
28 self._edge_runtime_available = None # Set to an unknown state initially
29
30
31 class WebView(Widget):
32 def create(self):
33 self.native = TogaWebBrowser(self.interface)
34 self.native.CoreWebView2InitializationCompleted += (
35 self.winforms_initialization_completed
36 )
37 self.native.NavigationCompleted += self.winforms_navigation_completed
38 self.native.KeyDown += self.winforms_key_down
39
40 props = CoreWebView2CreationProperties()
41 props.UserDataFolder = str(toga.App.app.paths.cache / "WebView2")
42 self.native.CreationProperties = props
43
44 # Trigger the configuration of the webview
45 self.native.EnsureCoreWebView2Async(None)
46
47 def winforms_initialization_completed(self, sender, args):
48 # The WebView2 widget has an "internal" widget (CoreWebView2) that is
49 # the actual web view. The view isn't ready until the internal widget has
50 # completed initialization, and that isn't done until an explicit
51 # request is made (EnsureCoreWebView2Async).
52 if args.IsSuccess:
53 # We've initialized, so we must have the runtime
54 self.native._edge_runtime_available = True
55 try:
56 settings = self.native.CoreWebView2.Settings
57
58 debug = True
59 settings.AreDefaultContextMenusEnabled = debug
60 settings.AreDefaultScriptDialogsEnabled = True
61 settings.AreDevToolsEnabled = debug
62 settings.IsBuiltInErrorPageEnabled = True
63 settings.IsScriptEnabled = True
64 settings.IsWebMessageEnabled = True
65 settings.IsStatusBarEnabled = debug
66 settings.IsZoomControlEnabled = True
67
68 self.set_user_agent(self.interface.user_agent)
69
70 if self.interface._html_content:
71 self.set_content(self.interface.url, self.interface._html_content)
72 else:
73 self.set_url(self.interface.url)
74
75 except Exception:
76 traceback.print_exc()
77 else:
78 if isinstance(
79 args.InitializationException, WebView2RuntimeNotFoundException
80 ):
81 print("Could not find the Microsoft Edge WebView2 Runtime.")
82 if self.native._edge_runtime_available is None:
83 # The initialize message is sent twice on failure.
84 # We only want to show the dialog once, so track that we
85 # know the runtime is missing.
86 self.native._edge_runtime_available = False
87 WinForms.MessageBox.Show(
88 "The Microsoft Edge WebView2 Runtime is not installed. "
89 "Web content will not be displayed.\n\n"
90 "Click OK to download the WebView2 Evergreen Runtime "
91 "Bootstrapper from Microsoft.",
92 "Missing Edge Webview2 runtime",
93 WinForms.MessageBoxButtons.OK,
94 WinForms.MessageBoxIcon.Error,
95 )
96 webbrowser.open(
97 "https://developer.microsoft.com/en-us/microsoft-edge/webview2/#download-section"
98 )
99 else:
100 print(args.InitializationException)
101
102 def winforms_navigation_completed(self, sender, args):
103 if self.interface.on_webview_load:
104 self.interface.on_webview_load(self.interface)
105
106 def winforms_key_down(self, sender, args):
107 if self.interface.on_key_down:
108 self.interface.on_key_down(self.interface, **toga_key(args))
109
110 def set_on_key_down(self, handler):
111 pass
112
113 def set_on_webview_load(self, handler):
114 pass
115
116 def get_url(self):
117 return str(self.native.Source)
118
119 def set_url(self, value):
120 if value:
121 self.native.Source = Uri(value)
122
123 def set_content(self, root_url, content):
124 if content and self.native.CoreWebView2:
125 self.native.CoreWebView2.NavigateToString(content)
126
127 def get_dom(self):
128 self.interface.factory.not_implemented("WebView.get_dom()")
129
130 def set_user_agent(self, value):
131 user_agent = (
132 value
133 if value
134 else (
135 "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
136 "(KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36 Edg/90.0.818.46"
137 )
138 )
139 if self.native.CoreWebView2:
140 self.native.CoreWebView2.Settings.UserAgent = user_agent
141
142 async def evaluate_javascript(self, javascript):
143 loop = get_event_loop()
144 future = loop.create_future()
145
146 task_scheduler = TaskScheduler.FromCurrentSynchronizationContext()
147 try:
148
149 def callback(task):
150 future.set_result(task.Result)
151
152 self.native.ExecuteScriptAsync(javascript).ContinueWith(
153 Action[Task[String]](callback), task_scheduler
154 )
155 except Exception:
156 traceback.print_exc()
157 future.set_result(None)
158
159 return await future
160
161 def invoke_javascript(self, javascript):
162 # The script will execute async, but you weren't going to get the result
163 # anyway, so it doesn't really matter.
164 self.native.ExecuteScriptAsync(javascript)
165
166 def rehint(self):
167 self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)
168 self.interface.intrinsic.height = at_least(self.interface.MIN_HEIGHT)
169
[end of winforms/src/toga_winforms/widgets/webview.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/winforms/src/toga_winforms/widgets/webview.py b/winforms/src/toga_winforms/widgets/webview.py
--- a/winforms/src/toga_winforms/widgets/webview.py
+++ b/winforms/src/toga_winforms/widgets/webview.py
@@ -8,6 +8,7 @@
from toga_winforms.keys import toga_key
from toga_winforms.libs import (
Action,
+ Color,
CoreWebView2CreationProperties,
String,
Task,
@@ -43,6 +44,7 @@
# Trigger the configuration of the webview
self.native.EnsureCoreWebView2Async(None)
+ self.native.DefaultBackgroundColor = Color.Transparent
def winforms_initialization_completed(self, sender, args):
# The WebView2 widget has an "internal" widget (CoreWebView2) that is
| {"golden_diff": "diff --git a/winforms/src/toga_winforms/widgets/webview.py b/winforms/src/toga_winforms/widgets/webview.py\n--- a/winforms/src/toga_winforms/widgets/webview.py\n+++ b/winforms/src/toga_winforms/widgets/webview.py\n@@ -8,6 +8,7 @@\n from toga_winforms.keys import toga_key\n from toga_winforms.libs import (\n Action,\n+ Color,\n CoreWebView2CreationProperties,\n String,\n Task,\n@@ -43,6 +44,7 @@\n \n # Trigger the configuration of the webview\n self.native.EnsureCoreWebView2Async(None)\n+ self.native.DefaultBackgroundColor = Color.Transparent\n \n def winforms_initialization_completed(self, sender, args):\n # The WebView2 widget has an \"internal\" widget (CoreWebView2) that is\n", "issue": "Hide webview black background when resizing\n### Describe the bug\r\n\r\nBy default in Windows, a webview will render a black background behind it. When resizing the webview, this black background becomes visible for a couple of frames while the html rendered on top is still in the process of updating, resulting in black \"bars\" appearing at the side of the webview when resizing it.\r\n\r\nThis can look quite ugly\r\n\r\n### Steps to reproduce\r\n\r\nsee: https://youtu.be/Xj1rW4L-Pqk\r\n\r\n```py\r\n\"\"\"\r\nMy first application\r\n\"\"\"\r\nimport toga\r\nfrom toga.style import Pack\r\nfrom toga.style.pack import COLUMN, ROW\r\n\r\n\r\nclass HelloWorld(toga.App):\r\n\r\n def startup(self):\r\n \"\"\"\r\n Construct and show the Toga application.\r\n\r\n Usually, you would add your application to a main content box.\r\n We then create a main window (with a name matching the app), and\r\n show the main window.\r\n \"\"\"\r\n web = toga.WebView(url='https://google.com')\r\n self.main_window = toga.MainWindow(title=self.formal_name)\r\n self.main_window.content = web\r\n self.main_window.show()\r\n\r\n\r\n\r\n\r\ndef main():\r\n helloWorld = HelloWorld(\"foobar\",\r\n \"org.foo.bar\")\r\n return helloWorld\r\n\r\n\r\nmain().main_loop()\r\n```\r\n\r\n### Environment\r\n\r\n- Operating System:\r\n- Windows 11, winforms\r\n- Python version:\r\n- 3.10\r\n- Software versions:\r\n - Briefcase:\r\n - 0.3.12\r\n - Toga:\r\n - 0.3.0\r\n\r\n\n", "before_files": [{"content": "import traceback\nimport webbrowser\nfrom asyncio import get_event_loop\n\nfrom travertino.size import at_least\n\nimport toga\nfrom toga_winforms.keys import toga_key\nfrom toga_winforms.libs import (\n Action,\n CoreWebView2CreationProperties,\n String,\n Task,\n TaskScheduler,\n Uri,\n WebView2,\n WebView2RuntimeNotFoundException,\n WinForms,\n)\n\nfrom .base import Widget\n\n\nclass TogaWebBrowser(WebView2):\n def __init__(self, interface):\n super().__init__()\n self.interface = interface\n self._edge_runtime_available = None # Set to an unknown state initially\n\n\nclass WebView(Widget):\n def create(self):\n self.native = TogaWebBrowser(self.interface)\n self.native.CoreWebView2InitializationCompleted += (\n self.winforms_initialization_completed\n )\n self.native.NavigationCompleted += self.winforms_navigation_completed\n self.native.KeyDown += self.winforms_key_down\n\n props = CoreWebView2CreationProperties()\n props.UserDataFolder = str(toga.App.app.paths.cache / \"WebView2\")\n self.native.CreationProperties = props\n\n # Trigger the configuration of the webview\n self.native.EnsureCoreWebView2Async(None)\n\n def winforms_initialization_completed(self, sender, args):\n # The WebView2 widget has an \"internal\" widget (CoreWebView2) that is\n # the actual web view. The view isn't ready until the internal widget has\n # completed initialization, and that isn't done until an explicit\n # request is made (EnsureCoreWebView2Async).\n if args.IsSuccess:\n # We've initialized, so we must have the runtime\n self.native._edge_runtime_available = True\n try:\n settings = self.native.CoreWebView2.Settings\n\n debug = True\n settings.AreDefaultContextMenusEnabled = debug\n settings.AreDefaultScriptDialogsEnabled = True\n settings.AreDevToolsEnabled = debug\n settings.IsBuiltInErrorPageEnabled = True\n settings.IsScriptEnabled = True\n settings.IsWebMessageEnabled = True\n settings.IsStatusBarEnabled = debug\n settings.IsZoomControlEnabled = True\n\n self.set_user_agent(self.interface.user_agent)\n\n if self.interface._html_content:\n self.set_content(self.interface.url, self.interface._html_content)\n else:\n self.set_url(self.interface.url)\n\n except Exception:\n traceback.print_exc()\n else:\n if isinstance(\n args.InitializationException, WebView2RuntimeNotFoundException\n ):\n print(\"Could not find the Microsoft Edge WebView2 Runtime.\")\n if self.native._edge_runtime_available is None:\n # The initialize message is sent twice on failure.\n # We only want to show the dialog once, so track that we\n # know the runtime is missing.\n self.native._edge_runtime_available = False\n WinForms.MessageBox.Show(\n \"The Microsoft Edge WebView2 Runtime is not installed. \"\n \"Web content will not be displayed.\\n\\n\"\n \"Click OK to download the WebView2 Evergreen Runtime \"\n \"Bootstrapper from Microsoft.\",\n \"Missing Edge Webview2 runtime\",\n WinForms.MessageBoxButtons.OK,\n WinForms.MessageBoxIcon.Error,\n )\n webbrowser.open(\n \"https://developer.microsoft.com/en-us/microsoft-edge/webview2/#download-section\"\n )\n else:\n print(args.InitializationException)\n\n def winforms_navigation_completed(self, sender, args):\n if self.interface.on_webview_load:\n self.interface.on_webview_load(self.interface)\n\n def winforms_key_down(self, sender, args):\n if self.interface.on_key_down:\n self.interface.on_key_down(self.interface, **toga_key(args))\n\n def set_on_key_down(self, handler):\n pass\n\n def set_on_webview_load(self, handler):\n pass\n\n def get_url(self):\n return str(self.native.Source)\n\n def set_url(self, value):\n if value:\n self.native.Source = Uri(value)\n\n def set_content(self, root_url, content):\n if content and self.native.CoreWebView2:\n self.native.CoreWebView2.NavigateToString(content)\n\n def get_dom(self):\n self.interface.factory.not_implemented(\"WebView.get_dom()\")\n\n def set_user_agent(self, value):\n user_agent = (\n value\n if value\n else (\n \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 \"\n \"(KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36 Edg/90.0.818.46\"\n )\n )\n if self.native.CoreWebView2:\n self.native.CoreWebView2.Settings.UserAgent = user_agent\n\n async def evaluate_javascript(self, javascript):\n loop = get_event_loop()\n future = loop.create_future()\n\n task_scheduler = TaskScheduler.FromCurrentSynchronizationContext()\n try:\n\n def callback(task):\n future.set_result(task.Result)\n\n self.native.ExecuteScriptAsync(javascript).ContinueWith(\n Action[Task[String]](callback), task_scheduler\n )\n except Exception:\n traceback.print_exc()\n future.set_result(None)\n\n return await future\n\n def invoke_javascript(self, javascript):\n # The script will execute async, but you weren't going to get the result\n # anyway, so it doesn't really matter.\n self.native.ExecuteScriptAsync(javascript)\n\n def rehint(self):\n self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)\n self.interface.intrinsic.height = at_least(self.interface.MIN_HEIGHT)\n", "path": "winforms/src/toga_winforms/widgets/webview.py"}]} | 2,495 | 182 |
gh_patches_debug_39951 | rasdani/github-patches | git_diff | liqd__a4-opin-346 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Choose template: Small issues
There are some small wording issues when you choose a template to create a project in the dashboard. See comments in screenshot.

</issue>
<code>
[start of euth/dashboard/templatetags/dashboard_templatetags.py]
1 from django import template
2
3 register = template.Library()
4
5
6 @register.simple_tag
7 def selected(request, pattern):
8 path = request.path
9 if path == pattern:
10 return 'selected'
11 return ''
12
[end of euth/dashboard/templatetags/dashboard_templatetags.py]
[start of euth/dashboard/urls.py]
1 from django.conf.urls import url
2
3 from . import views
4
5 urlpatterns = [
6 url(
7 r'^$',
8 views.dashboard,
9 name='dashboard'),
10 url(
11 r'^profile$',
12 views.DashboardProfileView.as_view(),
13 name='dashboard-profile'),
14 url(
15 r'^email/$',
16 views.DashboardEmailView.as_view(),
17 name='dashboard-email'
18 ),
19 url(
20 r'^connections/$',
21 views.DashboardAccountView.as_view(),
22 name='dashboard-connections'
23 ),
24 url(
25 r'^(?P<organisation_slug>[-\w_]+)/$',
26 views.DashboardOrganisationUpdateView.as_view(),
27 name='dashboard-organisation-edit'
28 ),
29 url(
30 r'^(?P<organisation_slug>[-\w_]+)/projects/$',
31 views.DashboardProjectListView.as_view(),
32 name='dashboard-project-list'),
33 url(r'^(?P<organisation_slug>[-\w_]+)/blueprints/$',
34 views.DashboardBlueprintListView.as_view(),
35 name='dashboard-blueprint-list'),
36 url(r'^(?P<organisation_slug>[-\w_]+)/blueprints/'
37 r'(?P<blueprint_slug>[-\w_]+)/$',
38 views.DashboardProjectCreateView.as_view(),
39 name='dashboard-project-create'),
40 url(
41 r'^(?P<organisation_slug>[-\w_]+)/projects/(?P<slug>[-\w_]+)/$',
42 views.DashboardProjectUpdateView.as_view(),
43 name='dashboard-project-edit'
44 ),
45 url(
46 r'^(?P<organisation_slug>[-\w_]+)/projects/(?P<slug>[-\w_]+)/users$',
47 views.DashboardProjectUserView.as_view(),
48 name='dashboard-project-users'
49 ),
50 url(
51 r'^(?P<organisation_slug>[-\w_]+)/projects/'
52 r'(?P<slug>[-\w_]+)/users/invite$',
53 views.DashboardProjectInviteView.as_view(),
54 name='dashboard-project-invite'
55 ),
56 ]
57
[end of euth/dashboard/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/euth/dashboard/templatetags/dashboard_templatetags.py b/euth/dashboard/templatetags/dashboard_templatetags.py
deleted file mode 100644
--- a/euth/dashboard/templatetags/dashboard_templatetags.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from django import template
-
-register = template.Library()
-
-
[email protected]_tag
-def selected(request, pattern):
- path = request.path
- if path == pattern:
- return 'selected'
- return ''
diff --git a/euth/dashboard/urls.py b/euth/dashboard/urls.py
--- a/euth/dashboard/urls.py
+++ b/euth/dashboard/urls.py
@@ -10,47 +10,57 @@
url(
r'^profile$',
views.DashboardProfileView.as_view(),
+ {'dashboard_menu_item': 'profile'},
name='dashboard-profile'),
url(
r'^email/$',
views.DashboardEmailView.as_view(),
+ {'dashboard_menu_item': 'email'},
name='dashboard-email'
),
url(
r'^connections/$',
views.DashboardAccountView.as_view(),
+ {'dashboard_menu_item': 'connections'},
name='dashboard-connections'
),
url(
r'^(?P<organisation_slug>[-\w_]+)/$',
views.DashboardOrganisationUpdateView.as_view(),
+ {'dashboard_menu_item': 'organisation'},
name='dashboard-organisation-edit'
),
url(
r'^(?P<organisation_slug>[-\w_]+)/projects/$',
views.DashboardProjectListView.as_view(),
+ {'dashboard_menu_item': 'project'},
name='dashboard-project-list'),
url(r'^(?P<organisation_slug>[-\w_]+)/blueprints/$',
views.DashboardBlueprintListView.as_view(),
+ {'dashboard_menu_item': 'project'},
name='dashboard-blueprint-list'),
url(r'^(?P<organisation_slug>[-\w_]+)/blueprints/'
r'(?P<blueprint_slug>[-\w_]+)/$',
views.DashboardProjectCreateView.as_view(),
+ {'dashboard_menu_item': 'project'},
name='dashboard-project-create'),
url(
r'^(?P<organisation_slug>[-\w_]+)/projects/(?P<slug>[-\w_]+)/$',
views.DashboardProjectUpdateView.as_view(),
+ {'dashboard_menu_item': 'project'},
name='dashboard-project-edit'
),
url(
r'^(?P<organisation_slug>[-\w_]+)/projects/(?P<slug>[-\w_]+)/users$',
views.DashboardProjectUserView.as_view(),
+ {'dashboard_menu_item': 'project'},
name='dashboard-project-users'
),
url(
r'^(?P<organisation_slug>[-\w_]+)/projects/'
r'(?P<slug>[-\w_]+)/users/invite$',
views.DashboardProjectInviteView.as_view(),
+ {'dashboard_menu_item': 'project'},
name='dashboard-project-invite'
),
]
| {"golden_diff": "diff --git a/euth/dashboard/templatetags/dashboard_templatetags.py b/euth/dashboard/templatetags/dashboard_templatetags.py\ndeleted file mode 100644\n--- a/euth/dashboard/templatetags/dashboard_templatetags.py\n+++ /dev/null\n@@ -1,11 +0,0 @@\n-from django import template\n-\n-register = template.Library()\n-\n-\[email protected]_tag\n-def selected(request, pattern):\n- path = request.path\n- if path == pattern:\n- return 'selected'\n- return ''\ndiff --git a/euth/dashboard/urls.py b/euth/dashboard/urls.py\n--- a/euth/dashboard/urls.py\n+++ b/euth/dashboard/urls.py\n@@ -10,47 +10,57 @@\n url(\n r'^profile$',\n views.DashboardProfileView.as_view(),\n+ {'dashboard_menu_item': 'profile'},\n name='dashboard-profile'),\n url(\n r'^email/$',\n views.DashboardEmailView.as_view(),\n+ {'dashboard_menu_item': 'email'},\n name='dashboard-email'\n ),\n url(\n r'^connections/$',\n views.DashboardAccountView.as_view(),\n+ {'dashboard_menu_item': 'connections'},\n name='dashboard-connections'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/$',\n views.DashboardOrganisationUpdateView.as_view(),\n+ {'dashboard_menu_item': 'organisation'},\n name='dashboard-organisation-edit'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/$',\n views.DashboardProjectListView.as_view(),\n+ {'dashboard_menu_item': 'project'},\n name='dashboard-project-list'),\n url(r'^(?P<organisation_slug>[-\\w_]+)/blueprints/$',\n views.DashboardBlueprintListView.as_view(),\n+ {'dashboard_menu_item': 'project'},\n name='dashboard-blueprint-list'),\n url(r'^(?P<organisation_slug>[-\\w_]+)/blueprints/'\n r'(?P<blueprint_slug>[-\\w_]+)/$',\n views.DashboardProjectCreateView.as_view(),\n+ {'dashboard_menu_item': 'project'},\n name='dashboard-project-create'),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/(?P<slug>[-\\w_]+)/$',\n views.DashboardProjectUpdateView.as_view(),\n+ {'dashboard_menu_item': 'project'},\n name='dashboard-project-edit'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/(?P<slug>[-\\w_]+)/users$',\n views.DashboardProjectUserView.as_view(),\n+ {'dashboard_menu_item': 'project'},\n name='dashboard-project-users'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/'\n r'(?P<slug>[-\\w_]+)/users/invite$',\n views.DashboardProjectInviteView.as_view(),\n+ {'dashboard_menu_item': 'project'},\n name='dashboard-project-invite'\n ),\n ]\n", "issue": "Choose template: Small issues\nThere are some small wording issues when you choose a template to create a project in the dashboard. See comments in screenshot.\n\n\n\n", "before_files": [{"content": "from django import template\n\nregister = template.Library()\n\n\[email protected]_tag\ndef selected(request, pattern):\n path = request.path\n if path == pattern:\n return 'selected'\n return ''\n", "path": "euth/dashboard/templatetags/dashboard_templatetags.py"}, {"content": "from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(\n r'^$',\n views.dashboard,\n name='dashboard'),\n url(\n r'^profile$',\n views.DashboardProfileView.as_view(),\n name='dashboard-profile'),\n url(\n r'^email/$',\n views.DashboardEmailView.as_view(),\n name='dashboard-email'\n ),\n url(\n r'^connections/$',\n views.DashboardAccountView.as_view(),\n name='dashboard-connections'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/$',\n views.DashboardOrganisationUpdateView.as_view(),\n name='dashboard-organisation-edit'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/$',\n views.DashboardProjectListView.as_view(),\n name='dashboard-project-list'),\n url(r'^(?P<organisation_slug>[-\\w_]+)/blueprints/$',\n views.DashboardBlueprintListView.as_view(),\n name='dashboard-blueprint-list'),\n url(r'^(?P<organisation_slug>[-\\w_]+)/blueprints/'\n r'(?P<blueprint_slug>[-\\w_]+)/$',\n views.DashboardProjectCreateView.as_view(),\n name='dashboard-project-create'),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/(?P<slug>[-\\w_]+)/$',\n views.DashboardProjectUpdateView.as_view(),\n name='dashboard-project-edit'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/(?P<slug>[-\\w_]+)/users$',\n views.DashboardProjectUserView.as_view(),\n name='dashboard-project-users'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/'\n r'(?P<slug>[-\\w_]+)/users/invite$',\n views.DashboardProjectInviteView.as_view(),\n name='dashboard-project-invite'\n ),\n]\n", "path": "euth/dashboard/urls.py"}]} | 1,270 | 685 |
gh_patches_debug_5112 | rasdani/github-patches | git_diff | ray-project__ray-6233 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Kubernetes Docker Container untagged
The container used by the kubernetes manifests (rayproject/autoscaler) doesn't have any useful tags. Furthermore the contents of the container are actually built using github.com/edoakes/ray and the container is two months old.
It might be better to properly tag the containers with versions and ensure they are built from the main repo (by travis) so that there aren't small differences in the code and it stays up to date.
</issue>
<code>
[start of python/setup.py]
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4
5 import glob
6 import os
7 import re
8 import shutil
9 import subprocess
10 import sys
11
12 from setuptools import setup, find_packages, Distribution
13 import setuptools.command.build_ext as _build_ext
14
15 # Ideally, we could include these files by putting them in a
16 # MANIFEST.in or using the package_data argument to setup, but the
17 # MANIFEST.in gets applied at the very beginning when setup.py runs
18 # before these files have been created, so we have to move the files
19 # manually.
20
21 # NOTE: The lists below must be kept in sync with ray/BUILD.bazel.
22
23 ray_files = [
24 "ray/core/src/ray/thirdparty/redis/src/redis-server",
25 "ray/core/src/ray/gcs/redis_module/libray_redis_module.so",
26 "ray/core/src/plasma/plasma_store_server",
27 "ray/_raylet.so",
28 "ray/core/src/ray/raylet/raylet_monitor",
29 "ray/core/src/ray/raylet/raylet",
30 "ray/dashboard/dashboard.py",
31 ]
32
33 # These are the directories where automatically generated Python protobuf
34 # bindings are created.
35 generated_python_directories = [
36 "ray/core/generated",
37 ]
38
39 optional_ray_files = []
40
41 ray_autoscaler_files = [
42 "ray/autoscaler/aws/example-full.yaml",
43 "ray/autoscaler/gcp/example-full.yaml",
44 "ray/autoscaler/local/example-full.yaml",
45 "ray/autoscaler/kubernetes/example-full.yaml",
46 "ray/autoscaler/kubernetes/kubectl-rsync.sh",
47 ]
48
49 ray_project_files = [
50 "ray/projects/schema.json", "ray/projects/templates/cluster_template.yaml",
51 "ray/projects/templates/project_template.yaml",
52 "ray/projects/templates/requirements.txt"
53 ]
54
55 ray_dashboard_files = [
56 "ray/dashboard/client/build/favicon.ico",
57 "ray/dashboard/client/build/index.html",
58 ]
59 for dirname in ["css", "js", "media"]:
60 ray_dashboard_files += glob.glob(
61 "ray/dashboard/client/build/static/{}/*".format(dirname))
62
63 optional_ray_files += ray_autoscaler_files
64 optional_ray_files += ray_project_files
65 optional_ray_files += ray_dashboard_files
66
67 if "RAY_USE_NEW_GCS" in os.environ and os.environ["RAY_USE_NEW_GCS"] == "on":
68 ray_files += [
69 "ray/core/src/credis/build/src/libmember.so",
70 "ray/core/src/credis/build/src/libmaster.so",
71 "ray/core/src/credis/redis/src/redis-server"
72 ]
73
74 extras = {
75 "rllib": [
76 "pyyaml", "gym[atari]", "opencv-python-headless", "lz4", "scipy",
77 "tabulate"
78 ],
79 "debug": ["psutil", "setproctitle", "py-spy >= 0.2.0"],
80 "dashboard": ["aiohttp", "google", "grpcio", "psutil", "setproctitle"],
81 "serve": ["uvicorn", "pygments", "werkzeug", "flask", "pandas"],
82 "tune": ["tabulate"],
83 }
84
85
86 class build_ext(_build_ext.build_ext):
87 def run(self):
88 # Note: We are passing in sys.executable so that we use the same
89 # version of Python to build pyarrow inside the build.sh script. Note
90 # that certain flags will not be passed along such as --user or sudo.
91 # TODO(rkn): Fix this.
92 command = ["../build.sh", "-p", sys.executable]
93 if os.getenv("RAY_INSTALL_JAVA") == "1":
94 # Also build binaries for Java if the above env variable exists.
95 command += ["-l", "python,java"]
96 subprocess.check_call(command)
97
98 # We also need to install pyarrow along with Ray, so make sure that the
99 # relevant non-Python pyarrow files get copied.
100 pyarrow_files = []
101 for (root, dirs, filenames) in os.walk("./ray/pyarrow_files/pyarrow"):
102 for name in filenames:
103 pyarrow_files.append(os.path.join(root, name))
104
105 # We also need to install pickle5 along with Ray, so make sure that the
106 # relevant non-Python pickle5 files get copied.
107 pickle5_files = []
108 for (root, dirs, filenames) in os.walk("./ray/pickle5_files/pickle5"):
109 for name in filenames:
110 pickle5_files.append(os.path.join(root, name))
111
112 files_to_include = ray_files + pyarrow_files + pickle5_files
113
114 # Copy over the autogenerated protobuf Python bindings.
115 for directory in generated_python_directories:
116 for filename in os.listdir(directory):
117 if filename[-3:] == ".py":
118 files_to_include.append(os.path.join(directory, filename))
119
120 for filename in files_to_include:
121 self.move_file(filename)
122
123 # Try to copy over the optional files.
124 for filename in optional_ray_files:
125 try:
126 self.move_file(filename)
127 except Exception:
128 print("Failed to copy optional file {}. This is ok."
129 .format(filename))
130
131 def move_file(self, filename):
132 # TODO(rkn): This feels very brittle. It may not handle all cases. See
133 # https://github.com/apache/arrow/blob/master/python/setup.py for an
134 # example.
135 source = filename
136 destination = os.path.join(self.build_lib, filename)
137 # Create the target directory if it doesn't already exist.
138 parent_directory = os.path.dirname(destination)
139 if not os.path.exists(parent_directory):
140 os.makedirs(parent_directory)
141 if not os.path.exists(destination):
142 print("Copying {} to {}.".format(source, destination))
143 shutil.copy(source, destination)
144
145
146 class BinaryDistribution(Distribution):
147 def has_ext_modules(self):
148 return True
149
150
151 def find_version(*filepath):
152 # Extract version information from filepath
153 here = os.path.abspath(os.path.dirname(__file__))
154 with open(os.path.join(here, *filepath)) as fp:
155 version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
156 fp.read(), re.M)
157 if version_match:
158 return version_match.group(1)
159 raise RuntimeError("Unable to find version string.")
160
161
162 requires = [
163 "numpy >= 1.14",
164 "filelock",
165 "jsonschema",
166 "funcsigs",
167 "click",
168 "colorama",
169 "pytest",
170 "pyyaml",
171 "redis>=3.3.2",
172 # NOTE: Don't upgrade the version of six! Doing so causes installation
173 # problems. See https://github.com/ray-project/ray/issues/4169.
174 "six >= 1.0.0",
175 "faulthandler;python_version<'3.3'",
176 "protobuf >= 3.8.0",
177 ]
178
179 setup(
180 name="ray",
181 version=find_version("ray", "__init__.py"),
182 author="Ray Team",
183 author_email="[email protected]",
184 description=("A system for parallel and distributed Python that unifies "
185 "the ML ecosystem."),
186 long_description=open("../README.rst").read(),
187 url="https://github.com/ray-project/ray",
188 keywords=("ray distributed parallel machine-learning "
189 "reinforcement-learning deep-learning python"),
190 packages=find_packages(),
191 cmdclass={"build_ext": build_ext},
192 # The BinaryDistribution argument triggers build_ext.
193 distclass=BinaryDistribution,
194 install_requires=requires,
195 setup_requires=["cython >= 0.29"],
196 extras_require=extras,
197 entry_points={
198 "console_scripts": [
199 "ray=ray.scripts.scripts:main",
200 "rllib=ray.rllib.scripts:cli [rllib]", "tune=ray.tune.scripts:cli"
201 ]
202 },
203 include_package_data=True,
204 zip_safe=False,
205 license="Apache 2.0")
206
[end of python/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/setup.py b/python/setup.py
--- a/python/setup.py
+++ b/python/setup.py
@@ -3,6 +3,7 @@
from __future__ import print_function
import glob
+from itertools import chain
import os
import re
import shutil
@@ -82,6 +83,8 @@
"tune": ["tabulate"],
}
+extras["all"] = list(set(chain.from_iterable(extras.values())))
+
class build_ext(_build_ext.build_ext):
def run(self):
| {"golden_diff": "diff --git a/python/setup.py b/python/setup.py\n--- a/python/setup.py\n+++ b/python/setup.py\n@@ -3,6 +3,7 @@\n from __future__ import print_function\n \n import glob\n+from itertools import chain\n import os\n import re\n import shutil\n@@ -82,6 +83,8 @@\n \"tune\": [\"tabulate\"],\n }\n \n+extras[\"all\"] = list(set(chain.from_iterable(extras.values())))\n+\n \n class build_ext(_build_ext.build_ext):\n def run(self):\n", "issue": "Kubernetes Docker Container untagged\nThe container used by the kubernetes manifests (rayproject/autoscaler) doesn't have any useful tags. Furthermore the contents of the container are actually built using github.com/edoakes/ray and the container is two months old.\r\n\r\nIt might be better to properly tag the containers with versions and ensure they are built from the main repo (by travis) so that there aren't small differences in the code and it stays up to date.\r\n\r\n\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport glob\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup, find_packages, Distribution\nimport setuptools.command.build_ext as _build_ext\n\n# Ideally, we could include these files by putting them in a\n# MANIFEST.in or using the package_data argument to setup, but the\n# MANIFEST.in gets applied at the very beginning when setup.py runs\n# before these files have been created, so we have to move the files\n# manually.\n\n# NOTE: The lists below must be kept in sync with ray/BUILD.bazel.\n\nray_files = [\n \"ray/core/src/ray/thirdparty/redis/src/redis-server\",\n \"ray/core/src/ray/gcs/redis_module/libray_redis_module.so\",\n \"ray/core/src/plasma/plasma_store_server\",\n \"ray/_raylet.so\",\n \"ray/core/src/ray/raylet/raylet_monitor\",\n \"ray/core/src/ray/raylet/raylet\",\n \"ray/dashboard/dashboard.py\",\n]\n\n# These are the directories where automatically generated Python protobuf\n# bindings are created.\ngenerated_python_directories = [\n \"ray/core/generated\",\n]\n\noptional_ray_files = []\n\nray_autoscaler_files = [\n \"ray/autoscaler/aws/example-full.yaml\",\n \"ray/autoscaler/gcp/example-full.yaml\",\n \"ray/autoscaler/local/example-full.yaml\",\n \"ray/autoscaler/kubernetes/example-full.yaml\",\n \"ray/autoscaler/kubernetes/kubectl-rsync.sh\",\n]\n\nray_project_files = [\n \"ray/projects/schema.json\", \"ray/projects/templates/cluster_template.yaml\",\n \"ray/projects/templates/project_template.yaml\",\n \"ray/projects/templates/requirements.txt\"\n]\n\nray_dashboard_files = [\n \"ray/dashboard/client/build/favicon.ico\",\n \"ray/dashboard/client/build/index.html\",\n]\nfor dirname in [\"css\", \"js\", \"media\"]:\n ray_dashboard_files += glob.glob(\n \"ray/dashboard/client/build/static/{}/*\".format(dirname))\n\noptional_ray_files += ray_autoscaler_files\noptional_ray_files += ray_project_files\noptional_ray_files += ray_dashboard_files\n\nif \"RAY_USE_NEW_GCS\" in os.environ and os.environ[\"RAY_USE_NEW_GCS\"] == \"on\":\n ray_files += [\n \"ray/core/src/credis/build/src/libmember.so\",\n \"ray/core/src/credis/build/src/libmaster.so\",\n \"ray/core/src/credis/redis/src/redis-server\"\n ]\n\nextras = {\n \"rllib\": [\n \"pyyaml\", \"gym[atari]\", \"opencv-python-headless\", \"lz4\", \"scipy\",\n \"tabulate\"\n ],\n \"debug\": [\"psutil\", \"setproctitle\", \"py-spy >= 0.2.0\"],\n \"dashboard\": [\"aiohttp\", \"google\", \"grpcio\", \"psutil\", \"setproctitle\"],\n \"serve\": [\"uvicorn\", \"pygments\", \"werkzeug\", \"flask\", \"pandas\"],\n \"tune\": [\"tabulate\"],\n}\n\n\nclass build_ext(_build_ext.build_ext):\n def run(self):\n # Note: We are passing in sys.executable so that we use the same\n # version of Python to build pyarrow inside the build.sh script. Note\n # that certain flags will not be passed along such as --user or sudo.\n # TODO(rkn): Fix this.\n command = [\"../build.sh\", \"-p\", sys.executable]\n if os.getenv(\"RAY_INSTALL_JAVA\") == \"1\":\n # Also build binaries for Java if the above env variable exists.\n command += [\"-l\", \"python,java\"]\n subprocess.check_call(command)\n\n # We also need to install pyarrow along with Ray, so make sure that the\n # relevant non-Python pyarrow files get copied.\n pyarrow_files = []\n for (root, dirs, filenames) in os.walk(\"./ray/pyarrow_files/pyarrow\"):\n for name in filenames:\n pyarrow_files.append(os.path.join(root, name))\n\n # We also need to install pickle5 along with Ray, so make sure that the\n # relevant non-Python pickle5 files get copied.\n pickle5_files = []\n for (root, dirs, filenames) in os.walk(\"./ray/pickle5_files/pickle5\"):\n for name in filenames:\n pickle5_files.append(os.path.join(root, name))\n\n files_to_include = ray_files + pyarrow_files + pickle5_files\n\n # Copy over the autogenerated protobuf Python bindings.\n for directory in generated_python_directories:\n for filename in os.listdir(directory):\n if filename[-3:] == \".py\":\n files_to_include.append(os.path.join(directory, filename))\n\n for filename in files_to_include:\n self.move_file(filename)\n\n # Try to copy over the optional files.\n for filename in optional_ray_files:\n try:\n self.move_file(filename)\n except Exception:\n print(\"Failed to copy optional file {}. This is ok.\"\n .format(filename))\n\n def move_file(self, filename):\n # TODO(rkn): This feels very brittle. It may not handle all cases. See\n # https://github.com/apache/arrow/blob/master/python/setup.py for an\n # example.\n source = filename\n destination = os.path.join(self.build_lib, filename)\n # Create the target directory if it doesn't already exist.\n parent_directory = os.path.dirname(destination)\n if not os.path.exists(parent_directory):\n os.makedirs(parent_directory)\n if not os.path.exists(destination):\n print(\"Copying {} to {}.\".format(source, destination))\n shutil.copy(source, destination)\n\n\nclass BinaryDistribution(Distribution):\n def has_ext_modules(self):\n return True\n\n\ndef find_version(*filepath):\n # Extract version information from filepath\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, *filepath)) as fp:\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n fp.read(), re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nrequires = [\n \"numpy >= 1.14\",\n \"filelock\",\n \"jsonschema\",\n \"funcsigs\",\n \"click\",\n \"colorama\",\n \"pytest\",\n \"pyyaml\",\n \"redis>=3.3.2\",\n # NOTE: Don't upgrade the version of six! Doing so causes installation\n # problems. See https://github.com/ray-project/ray/issues/4169.\n \"six >= 1.0.0\",\n \"faulthandler;python_version<'3.3'\",\n \"protobuf >= 3.8.0\",\n]\n\nsetup(\n name=\"ray\",\n version=find_version(\"ray\", \"__init__.py\"),\n author=\"Ray Team\",\n author_email=\"[email protected]\",\n description=(\"A system for parallel and distributed Python that unifies \"\n \"the ML ecosystem.\"),\n long_description=open(\"../README.rst\").read(),\n url=\"https://github.com/ray-project/ray\",\n keywords=(\"ray distributed parallel machine-learning \"\n \"reinforcement-learning deep-learning python\"),\n packages=find_packages(),\n cmdclass={\"build_ext\": build_ext},\n # The BinaryDistribution argument triggers build_ext.\n distclass=BinaryDistribution,\n install_requires=requires,\n setup_requires=[\"cython >= 0.29\"],\n extras_require=extras,\n entry_points={\n \"console_scripts\": [\n \"ray=ray.scripts.scripts:main\",\n \"rllib=ray.rllib.scripts:cli [rllib]\", \"tune=ray.tune.scripts:cli\"\n ]\n },\n include_package_data=True,\n zip_safe=False,\n license=\"Apache 2.0\")\n", "path": "python/setup.py"}]} | 2,852 | 115 |
gh_patches_debug_11539 | rasdani/github-patches | git_diff | Pylons__pyramid-2277 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pyobject truncates code at comment
See https://github.com/sphinx-doc/sphinx/issues/2253
Example rendered docs:
http://docs.pylonsproject.org/projects/pyramid/en/latest/quick_tour.html#handling-web-requests-and-responses
rst syntax:
https://github.com/Pylons/pyramid/blame/master/docs/quick_tour.rst#L119-L120
Source code:
https://github.com/Pylons/pyramid/blob/master/docs/quick_tour/requests/app.py#L7
When the bug is fixed and released, we will need to:
- revert the source code sample to use `#` style comments
- bump up the Sphinx version
</issue>
<code>
[start of setup.py]
1 ##############################################################################
2 #
3 # Copyright (c) 2008-2013 Agendaless Consulting and Contributors.
4 # All Rights Reserved.
5 #
6 # This software is subject to the provisions of the BSD-like license at
7 # http://www.repoze.org/LICENSE.txt. A copy of the license should accompany
8 # this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL
9 # EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,
10 # THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND
11 # FITNESS FOR A PARTICULAR PURPOSE
12 #
13 ##############################################################################
14
15 import os
16 import sys
17
18 from setuptools import setup, find_packages
19
20 py_version = sys.version_info[:2]
21 is_pypy = '__pypy__' in sys.builtin_module_names
22
23 PY3 = py_version[0] == 3
24
25 if PY3:
26 if py_version < (3, 3) and not is_pypy: # PyPy3 masquerades as Python 3.2...
27 raise RuntimeError('On Python 3, Pyramid requires Python 3.3 or better')
28 else:
29 if py_version < (2, 6):
30 raise RuntimeError('On Python 2, Pyramid requires Python 2.6 or better')
31
32 here = os.path.abspath(os.path.dirname(__file__))
33 try:
34 with open(os.path.join(here, 'README.rst')) as f:
35 README = f.read()
36 with open(os.path.join(here, 'CHANGES.txt')) as f:
37 CHANGES = f.read()
38 except IOError:
39 README = CHANGES = ''
40
41 install_requires=[
42 'setuptools',
43 'WebOb >= 1.3.1', # request.domain and CookieProfile
44 'repoze.lru >= 0.4', # py3 compat
45 'zope.interface >= 3.8.0', # has zope.interface.registry
46 'zope.deprecation >= 3.5.0', # py3 compat
47 'venusian >= 1.0a3', # ``ignore``
48 'translationstring >= 0.4', # py3 compat
49 'PasteDeploy >= 1.5.0', # py3 compat
50 ]
51
52 tests_require = [
53 'WebTest >= 1.3.1', # py3 compat
54 ]
55
56 if not PY3:
57 tests_require.append('zope.component>=3.11.0')
58
59 docs_extras = [
60 'Sphinx >= 1.3.4',
61 'docutils',
62 'repoze.sphinx.autointerface',
63 'pylons_sphinx_latesturl',
64 'pylons-sphinx-themes',
65 'sphinxcontrib-programoutput',
66 ]
67
68 testing_extras = tests_require + [
69 'nose',
70 'coverage',
71 'virtualenv', # for scaffolding tests
72 ]
73
74 setup(name='pyramid',
75 version='1.7.dev0',
76 description='The Pyramid Web Framework, a Pylons project',
77 long_description=README + '\n\n' + CHANGES,
78 classifiers=[
79 "Development Status :: 6 - Mature",
80 "Intended Audience :: Developers",
81 "Programming Language :: Python",
82 "Programming Language :: Python :: 2.6",
83 "Programming Language :: Python :: 2.7",
84 "Programming Language :: Python :: 3",
85 "Programming Language :: Python :: 3.3",
86 "Programming Language :: Python :: 3.4",
87 "Programming Language :: Python :: 3.5",
88 "Programming Language :: Python :: Implementation :: CPython",
89 "Programming Language :: Python :: Implementation :: PyPy",
90 "Framework :: Pyramid",
91 "Topic :: Internet :: WWW/HTTP",
92 "Topic :: Internet :: WWW/HTTP :: WSGI",
93 "License :: Repoze Public License",
94 ],
95 keywords='web wsgi pylons pyramid',
96 author="Chris McDonough, Agendaless Consulting",
97 author_email="[email protected]",
98 url="http://docs.pylonsproject.org/en/latest/docs/pyramid.html",
99 license="BSD-derived (http://www.repoze.org/LICENSE.txt)",
100 packages=find_packages(),
101 include_package_data=True,
102 zip_safe=False,
103 install_requires = install_requires,
104 extras_require = {
105 'testing':testing_extras,
106 'docs':docs_extras,
107 },
108 tests_require = tests_require,
109 test_suite="pyramid.tests",
110 entry_points = """\
111 [pyramid.scaffold]
112 starter=pyramid.scaffolds:StarterProjectTemplate
113 zodb=pyramid.scaffolds:ZODBProjectTemplate
114 alchemy=pyramid.scaffolds:AlchemyProjectTemplate
115 [pyramid.pshell_runner]
116 python=pyramid.scripts.pshell:python_shell_runner
117 [console_scripts]
118 pcreate = pyramid.scripts.pcreate:main
119 pserve = pyramid.scripts.pserve:main
120 pshell = pyramid.scripts.pshell:main
121 proutes = pyramid.scripts.proutes:main
122 pviews = pyramid.scripts.pviews:main
123 ptweens = pyramid.scripts.ptweens:main
124 prequest = pyramid.scripts.prequest:main
125 pdistreport = pyramid.scripts.pdistreport:main
126 [paste.server_runner]
127 wsgiref = pyramid.scripts.pserve:wsgiref_server_runner
128 cherrypy = pyramid.scripts.pserve:cherrypy_server_runner
129 """
130 )
131
132
[end of setup.py]
[start of docs/quick_tour/requests/app.py]
1 from wsgiref.simple_server import make_server
2 from pyramid.config import Configurator
3 from pyramid.response import Response
4
5
6 def hello_world(request):
7 """ Some parameters from a request such as /?name=lisa """
8 url = request.url
9 name = request.params.get('name', 'No Name Provided')
10
11 body = 'URL %s with name: %s' % (url, name)
12 return Response(
13 content_type="text/plain",
14 body=body
15 )
16
17
18 if __name__ == '__main__':
19 config = Configurator()
20 config.add_route('hello', '/')
21 config.add_view(hello_world, route_name='hello')
22 app = config.make_wsgi_app()
23 server = make_server('0.0.0.0', 6543, app)
24 server.serve_forever()
25
[end of docs/quick_tour/requests/app.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/quick_tour/requests/app.py b/docs/quick_tour/requests/app.py
--- a/docs/quick_tour/requests/app.py
+++ b/docs/quick_tour/requests/app.py
@@ -4,7 +4,7 @@
def hello_world(request):
- """ Some parameters from a request such as /?name=lisa """
+ # Some parameters from a request such as /?name=lisa
url = request.url
name = request.params.get('name', 'No Name Provided')
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -57,7 +57,7 @@
tests_require.append('zope.component>=3.11.0')
docs_extras = [
- 'Sphinx >= 1.3.4',
+ 'Sphinx >= 1.3.5',
'docutils',
'repoze.sphinx.autointerface',
'pylons_sphinx_latesturl',
| {"golden_diff": "diff --git a/docs/quick_tour/requests/app.py b/docs/quick_tour/requests/app.py\n--- a/docs/quick_tour/requests/app.py\n+++ b/docs/quick_tour/requests/app.py\n@@ -4,7 +4,7 @@\n \n \n def hello_world(request):\n- \"\"\" Some parameters from a request such as /?name=lisa \"\"\"\n+ # Some parameters from a request such as /?name=lisa\n url = request.url\n name = request.params.get('name', 'No Name Provided')\n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -57,7 +57,7 @@\n tests_require.append('zope.component>=3.11.0')\n \n docs_extras = [\n- 'Sphinx >= 1.3.4',\n+ 'Sphinx >= 1.3.5',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n", "issue": "pyobject truncates code at comment\nSee https://github.com/sphinx-doc/sphinx/issues/2253\n\nExample rendered docs:\nhttp://docs.pylonsproject.org/projects/pyramid/en/latest/quick_tour.html#handling-web-requests-and-responses\n\nrst syntax:\nhttps://github.com/Pylons/pyramid/blame/master/docs/quick_tour.rst#L119-L120\n\nSource code:\nhttps://github.com/Pylons/pyramid/blob/master/docs/quick_tour/requests/app.py#L7\n\nWhen the bug is fixed and released, we will need to:\n- revert the source code sample to use `#` style comments\n- bump up the Sphinx version\n\n", "before_files": [{"content": "##############################################################################\n#\n# Copyright (c) 2008-2013 Agendaless Consulting and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the BSD-like license at\n# http://www.repoze.org/LICENSE.txt. A copy of the license should accompany\n# this distribution. THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL\n# EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND\n# FITNESS FOR A PARTICULAR PURPOSE\n#\n##############################################################################\n\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\npy_version = sys.version_info[:2]\nis_pypy = '__pypy__' in sys.builtin_module_names\n\nPY3 = py_version[0] == 3\n\nif PY3:\n if py_version < (3, 3) and not is_pypy: # PyPy3 masquerades as Python 3.2...\n raise RuntimeError('On Python 3, Pyramid requires Python 3.3 or better')\nelse:\n if py_version < (2, 6):\n raise RuntimeError('On Python 2, Pyramid requires Python 2.6 or better')\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n with open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\n with open(os.path.join(here, 'CHANGES.txt')) as f:\n CHANGES = f.read()\nexcept IOError:\n README = CHANGES = ''\n\ninstall_requires=[\n 'setuptools',\n 'WebOb >= 1.3.1', # request.domain and CookieProfile\n 'repoze.lru >= 0.4', # py3 compat\n 'zope.interface >= 3.8.0', # has zope.interface.registry\n 'zope.deprecation >= 3.5.0', # py3 compat\n 'venusian >= 1.0a3', # ``ignore``\n 'translationstring >= 0.4', # py3 compat\n 'PasteDeploy >= 1.5.0', # py3 compat\n ]\n\ntests_require = [\n 'WebTest >= 1.3.1', # py3 compat\n ]\n\nif not PY3:\n tests_require.append('zope.component>=3.11.0')\n\ndocs_extras = [\n 'Sphinx >= 1.3.4',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n 'pylons-sphinx-themes',\n 'sphinxcontrib-programoutput',\n ]\n\ntesting_extras = tests_require + [\n 'nose',\n 'coverage',\n 'virtualenv', # for scaffolding tests\n ]\n\nsetup(name='pyramid',\n version='1.7.dev0',\n description='The Pyramid Web Framework, a Pylons project',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Development Status :: 6 - Mature\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Framework :: Pyramid\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI\",\n \"License :: Repoze Public License\",\n ],\n keywords='web wsgi pylons pyramid',\n author=\"Chris McDonough, Agendaless Consulting\",\n author_email=\"[email protected]\",\n url=\"http://docs.pylonsproject.org/en/latest/docs/pyramid.html\",\n license=\"BSD-derived (http://www.repoze.org/LICENSE.txt)\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires = install_requires,\n extras_require = {\n 'testing':testing_extras,\n 'docs':docs_extras,\n },\n tests_require = tests_require,\n test_suite=\"pyramid.tests\",\n entry_points = \"\"\"\\\n [pyramid.scaffold]\n starter=pyramid.scaffolds:StarterProjectTemplate\n zodb=pyramid.scaffolds:ZODBProjectTemplate\n alchemy=pyramid.scaffolds:AlchemyProjectTemplate\n [pyramid.pshell_runner]\n python=pyramid.scripts.pshell:python_shell_runner\n [console_scripts]\n pcreate = pyramid.scripts.pcreate:main\n pserve = pyramid.scripts.pserve:main\n pshell = pyramid.scripts.pshell:main\n proutes = pyramid.scripts.proutes:main\n pviews = pyramid.scripts.pviews:main\n ptweens = pyramid.scripts.ptweens:main\n prequest = pyramid.scripts.prequest:main\n pdistreport = pyramid.scripts.pdistreport:main\n [paste.server_runner]\n wsgiref = pyramid.scripts.pserve:wsgiref_server_runner\n cherrypy = pyramid.scripts.pserve:cherrypy_server_runner\n \"\"\"\n )\n\n", "path": "setup.py"}, {"content": "from wsgiref.simple_server import make_server\nfrom pyramid.config import Configurator\nfrom pyramid.response import Response\n\n\ndef hello_world(request):\n \"\"\" Some parameters from a request such as /?name=lisa \"\"\"\n url = request.url\n name = request.params.get('name', 'No Name Provided')\n\n body = 'URL %s with name: %s' % (url, name)\n return Response(\n content_type=\"text/plain\",\n body=body\n )\n\n\nif __name__ == '__main__':\n config = Configurator()\n config.add_route('hello', '/')\n config.add_view(hello_world, route_name='hello')\n app = config.make_wsgi_app()\n server = make_server('0.0.0.0', 6543, app)\n server.serve_forever()\n", "path": "docs/quick_tour/requests/app.py"}]} | 2,391 | 223 |
gh_patches_debug_33519 | rasdani/github-patches | git_diff | TheAlgorithms__Python-10121 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve our test coverage
### Feature description
Many of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase.
### How to find low-coverage files
Go to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under "Run Tests" and scroll down until you find the section on code coverage:
```
---------- coverage: platform linux, python 3.12.0-final-0 -----------
Name Stmts Miss Cover Missing
-----------------------------------------------------------------------------------------------------------
quantum/q_fourier_transform.py 30 30 0% 14-93
scripts/validate_solutions.py 54 54 0% 2-94
strings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129
...
```
The "Cover" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests.
Some files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage.
_**When you open your PR, put "Contributes to #9943" in the PR description.**_ Do not use the word "fixes", "resolves", or "closes". This issue is an ongoing one, and your PR will not single-handedly resolve this issue.
### How to add doctests
A doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring:
```py
def add(a: int, b: int) -> int:
"""
Adds two non-negative numbers.
>>> add(1, 1)
2
>>> add(2, 5)
7
>>> add(1, 0)
1
>>> add(-1, -1)
Traceback (most recent last):
...
ValueError: Numbers must be non-negative
"""
```
For every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc).
Do not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it.
_**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_
</issue>
<code>
[start of maths/binary_exponentiation_2.py]
1 """
2 * Binary Exponentiation for Powers
3 * This is a method to find a^b in a time complexity of O(log b)
4 * This is one of the most commonly used methods of finding powers.
5 * Also useful in cases where solution to (a^b)%c is required,
6 * where a,b,c can be numbers over the computers calculation limits.
7 * Done using iteration, can also be done using recursion
8
9 * @author chinmoy159
10 * @version 1.0 dated 10/08/2017
11 """
12
13
14 def b_expo(a: int, b: int) -> int:
15 res = 1
16 while b > 0:
17 if b & 1:
18 res *= a
19
20 a *= a
21 b >>= 1
22
23 return res
24
25
26 def b_expo_mod(a: int, b: int, c: int) -> int:
27 res = 1
28 while b > 0:
29 if b & 1:
30 res = ((res % c) * (a % c)) % c
31
32 a *= a
33 b >>= 1
34
35 return res
36
37
38 """
39 * Wondering how this method works !
40 * It's pretty simple.
41 * Let's say you need to calculate a ^ b
42 * RULE 1 : a ^ b = (a*a) ^ (b/2) ---- example : 4 ^ 4 = (4*4) ^ (4/2) = 16 ^ 2
43 * RULE 2 : IF b is ODD, then ---- a ^ b = a * (a ^ (b - 1)) :: where (b - 1) is even.
44 * Once b is even, repeat the process to get a ^ b
45 * Repeat the process till b = 1 OR b = 0, because a^1 = a AND a^0 = 1
46 *
47 * As far as the modulo is concerned,
48 * the fact : (a*b) % c = ((a%c) * (b%c)) % c
49 * Now apply RULE 1 OR 2 whichever is required.
50 """
51
[end of maths/binary_exponentiation_2.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/maths/binary_exponentiation_2.py b/maths/binary_exponentiation_2.py
--- a/maths/binary_exponentiation_2.py
+++ b/maths/binary_exponentiation_2.py
@@ -1,17 +1,33 @@
"""
-* Binary Exponentiation for Powers
-* This is a method to find a^b in a time complexity of O(log b)
-* This is one of the most commonly used methods of finding powers.
-* Also useful in cases where solution to (a^b)%c is required,
-* where a,b,c can be numbers over the computers calculation limits.
-* Done using iteration, can also be done using recursion
-
-* @author chinmoy159
-* @version 1.0 dated 10/08/2017
+Binary Exponentiation
+This is a method to find a^b in O(log b) time complexity
+This is one of the most commonly used methods of exponentiation
+It's also useful when the solution to (a^b) % c is required because a, b, c may be
+over the computer's calculation limits
+
+Let's say you need to calculate a ^ b
+- RULE 1 : a ^ b = (a*a) ^ (b/2) ---- example : 4 ^ 4 = (4*4) ^ (4/2) = 16 ^ 2
+- RULE 2 : IF b is odd, then a ^ b = a * (a ^ (b - 1)), where b - 1 is even
+Once b is even, repeat the process until b = 1 or b = 0, because a^1 = a and a^0 = 1
+
+For modular exponentiation, we use the fact that (a*b) % c = ((a%c) * (b%c)) % c
+Now apply RULE 1 or 2 as required
+
+@author chinmoy159
"""
def b_expo(a: int, b: int) -> int:
+ """
+ >>> b_expo(2, 10)
+ 1024
+ >>> b_expo(9, 0)
+ 1
+ >>> b_expo(0, 12)
+ 0
+ >>> b_expo(4, 12)
+ 16777216
+ """
res = 1
while b > 0:
if b & 1:
@@ -24,6 +40,16 @@
def b_expo_mod(a: int, b: int, c: int) -> int:
+ """
+ >>> b_expo_mod(2, 10, 1000000007)
+ 1024
+ >>> b_expo_mod(11, 13, 19)
+ 11
+ >>> b_expo_mod(0, 19, 20)
+ 0
+ >>> b_expo_mod(15, 5, 4)
+ 3
+ """
res = 1
while b > 0:
if b & 1:
@@ -33,18 +59,3 @@
b >>= 1
return res
-
-
-"""
-* Wondering how this method works !
-* It's pretty simple.
-* Let's say you need to calculate a ^ b
-* RULE 1 : a ^ b = (a*a) ^ (b/2) ---- example : 4 ^ 4 = (4*4) ^ (4/2) = 16 ^ 2
-* RULE 2 : IF b is ODD, then ---- a ^ b = a * (a ^ (b - 1)) :: where (b - 1) is even.
-* Once b is even, repeat the process to get a ^ b
-* Repeat the process till b = 1 OR b = 0, because a^1 = a AND a^0 = 1
-*
-* As far as the modulo is concerned,
-* the fact : (a*b) % c = ((a%c) * (b%c)) % c
-* Now apply RULE 1 OR 2 whichever is required.
-"""
| {"golden_diff": "diff --git a/maths/binary_exponentiation_2.py b/maths/binary_exponentiation_2.py\n--- a/maths/binary_exponentiation_2.py\n+++ b/maths/binary_exponentiation_2.py\n@@ -1,17 +1,33 @@\n \"\"\"\n-* Binary Exponentiation for Powers\n-* This is a method to find a^b in a time complexity of O(log b)\n-* This is one of the most commonly used methods of finding powers.\n-* Also useful in cases where solution to (a^b)%c is required,\n-* where a,b,c can be numbers over the computers calculation limits.\n-* Done using iteration, can also be done using recursion\n-\n-* @author chinmoy159\n-* @version 1.0 dated 10/08/2017\n+Binary Exponentiation\n+This is a method to find a^b in O(log b) time complexity\n+This is one of the most commonly used methods of exponentiation\n+It's also useful when the solution to (a^b) % c is required because a, b, c may be\n+over the computer's calculation limits\n+\n+Let's say you need to calculate a ^ b\n+- RULE 1 : a ^ b = (a*a) ^ (b/2) ---- example : 4 ^ 4 = (4*4) ^ (4/2) = 16 ^ 2\n+- RULE 2 : IF b is odd, then a ^ b = a * (a ^ (b - 1)), where b - 1 is even\n+Once b is even, repeat the process until b = 1 or b = 0, because a^1 = a and a^0 = 1\n+\n+For modular exponentiation, we use the fact that (a*b) % c = ((a%c) * (b%c)) % c\n+Now apply RULE 1 or 2 as required\n+\n+@author chinmoy159\n \"\"\"\n \n \n def b_expo(a: int, b: int) -> int:\n+ \"\"\"\n+ >>> b_expo(2, 10)\n+ 1024\n+ >>> b_expo(9, 0)\n+ 1\n+ >>> b_expo(0, 12)\n+ 0\n+ >>> b_expo(4, 12)\n+ 16777216\n+ \"\"\"\n res = 1\n while b > 0:\n if b & 1:\n@@ -24,6 +40,16 @@\n \n \n def b_expo_mod(a: int, b: int, c: int) -> int:\n+ \"\"\"\n+ >>> b_expo_mod(2, 10, 1000000007)\n+ 1024\n+ >>> b_expo_mod(11, 13, 19)\n+ 11\n+ >>> b_expo_mod(0, 19, 20)\n+ 0\n+ >>> b_expo_mod(15, 5, 4)\n+ 3\n+ \"\"\"\n res = 1\n while b > 0:\n if b & 1:\n@@ -33,18 +59,3 @@\n b >>= 1\n \n return res\n-\n-\n-\"\"\"\n-* Wondering how this method works !\n-* It's pretty simple.\n-* Let's say you need to calculate a ^ b\n-* RULE 1 : a ^ b = (a*a) ^ (b/2) ---- example : 4 ^ 4 = (4*4) ^ (4/2) = 16 ^ 2\n-* RULE 2 : IF b is ODD, then ---- a ^ b = a * (a ^ (b - 1)) :: where (b - 1) is even.\n-* Once b is even, repeat the process to get a ^ b\n-* Repeat the process till b = 1 OR b = 0, because a^1 = a AND a^0 = 1\n-*\n-* As far as the modulo is concerned,\n-* the fact : (a*b) % c = ((a%c) * (b%c)) % c\n-* Now apply RULE 1 OR 2 whichever is required.\n-\"\"\"\n", "issue": "Improve our test coverage\n### Feature description\r\n\r\nMany of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase.\r\n\r\n### How to find low-coverage files\r\n\r\nGo to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under \"Run Tests\" and scroll down until you find the section on code coverage:\r\n```\r\n---------- coverage: platform linux, python 3.12.0-final-0 -----------\r\nName Stmts Miss Cover Missing\r\n-----------------------------------------------------------------------------------------------------------\r\nquantum/q_fourier_transform.py 30 30 0% 14-93\r\nscripts/validate_solutions.py 54 54 0% 2-94\r\nstrings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129\r\n...\r\n```\r\nThe \"Cover\" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests.\r\n\r\nSome files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage.\r\n\r\n_**When you open your PR, put \"Contributes to #9943\" in the PR description.**_ Do not use the word \"fixes\", \"resolves\", or \"closes\". This issue is an ongoing one, and your PR will not single-handedly resolve this issue.\r\n\r\n### How to add doctests\r\n\r\nA doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring:\r\n```py\r\ndef add(a: int, b: int) -> int:\r\n \"\"\"\r\n Adds two non-negative numbers.\r\n >>> add(1, 1)\r\n 2\r\n >>> add(2, 5)\r\n 7\r\n >>> add(1, 0)\r\n 1\r\n >>> add(-1, -1)\r\n Traceback (most recent last):\r\n ...\r\n ValueError: Numbers must be non-negative\r\n \"\"\"\r\n```\r\nFor every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc).\r\n\r\nDo not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it.\r\n\r\n_**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_\n", "before_files": [{"content": "\"\"\"\n* Binary Exponentiation for Powers\n* This is a method to find a^b in a time complexity of O(log b)\n* This is one of the most commonly used methods of finding powers.\n* Also useful in cases where solution to (a^b)%c is required,\n* where a,b,c can be numbers over the computers calculation limits.\n* Done using iteration, can also be done using recursion\n\n* @author chinmoy159\n* @version 1.0 dated 10/08/2017\n\"\"\"\n\n\ndef b_expo(a: int, b: int) -> int:\n res = 1\n while b > 0:\n if b & 1:\n res *= a\n\n a *= a\n b >>= 1\n\n return res\n\n\ndef b_expo_mod(a: int, b: int, c: int) -> int:\n res = 1\n while b > 0:\n if b & 1:\n res = ((res % c) * (a % c)) % c\n\n a *= a\n b >>= 1\n\n return res\n\n\n\"\"\"\n* Wondering how this method works !\n* It's pretty simple.\n* Let's say you need to calculate a ^ b\n* RULE 1 : a ^ b = (a*a) ^ (b/2) ---- example : 4 ^ 4 = (4*4) ^ (4/2) = 16 ^ 2\n* RULE 2 : IF b is ODD, then ---- a ^ b = a * (a ^ (b - 1)) :: where (b - 1) is even.\n* Once b is even, repeat the process to get a ^ b\n* Repeat the process till b = 1 OR b = 0, because a^1 = a AND a^0 = 1\n*\n* As far as the modulo is concerned,\n* the fact : (a*b) % c = ((a%c) * (b%c)) % c\n* Now apply RULE 1 OR 2 whichever is required.\n\"\"\"\n", "path": "maths/binary_exponentiation_2.py"}]} | 1,935 | 955 |
gh_patches_debug_21356 | rasdani/github-patches | git_diff | pymodbus-dev__pymodbus-1459 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pymodbus.simulator ignores command line arguments "http_port" and "json_file"
### Versions
- Python: 3.10.10
- OS: Fedora 36
- Pymodbus: 3.2.2
### Pymodbus Specific
- Server: tcp
- Client: tcp
### Description
- when starting pymodbus.simulator with a specific json file, an error stating a different filename is shown
```
$ pymodbus.simulator --json_file /tmp/setup.json
2023-03-25 20:06:03,532 INFO logging:96 Start simulator
Traceback (most recent call last):
File "/usr/bin/pymodbus.simulator", line 8, in <module>
sys.exit(main())
File "/usr/lib/python3.10/site-packages/pymodbus/server/simulator/main.py", line 119, in main
asyncio.run(run_main(), debug=True)
File "/usr/lib64/python3.10/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/usr/lib64/python3.10/asyncio/base_events.py", line 649, in run_until_complete
return future.result()
File "/usr/lib/python3.10/site-packages/pymodbus/server/simulator/main.py", line 113, in run_main
task = ModbusSimulatorServer(**cmd_args)
File "/usr/lib/python3.10/site-packages/pymodbus/server/simulator/http_server.py", line 130, in __init__
with open(json_file, encoding="utf-8") as file:
FileNotFoundError: [Errno 2] No such file or directory: './pymodbus/server/simulator/setup.json'
```
- the problem is in https://github.com/pymodbus-dev/pymodbus/blob/c2db53cad3c60834bffecd23db699f6982f914fb/pymodbus/server/simulator/main.py#L113 where the command line arguments are overwritten
- if it is the intention having default values, a patch keeping exactly these defaults would look like this:
```
diff --git a/pymodbus/server/simulator/main.py b/pymodbus/server/simulator/main.py
index 09af492..443e1a6 100755
--- a/pymodbus/server/simulator/main.py
+++ b/pymodbus/server/simulator/main.py
@@ -70,6 +70,7 @@ def get_commandline():
parser.add_argument(
"--http_port",
help="use <http_port> as port to bind http listen",
+ default=8081,
type=str,
)
parser.add_argument(
@@ -82,6 +83,7 @@ def get_commandline():
parser.add_argument(
"--json_file",
help='name of json file, default is "setup.json"',
+ default="./pymodbus/server/simulator/setup.json",
type=str,
)
parser.add_argument(
@@ -109,8 +111,6 @@ def get_commandline():
async def run_main():
"""Run server async."""
cmd_args = get_commandline()
- cmd_args["http_port"] = 8081
- cmd_args["json_file"] = "./pymodbus/server/simulator/setup.json"
task = ModbusSimulatorServer(**cmd_args)
await task.run_forever()
```
- however, for running pymodbus.simulator after installing without any arguments, I would suggest the following:
- install the default setup.json e.g. into the same directory as main.py of the simulator
- use a diff like this:
```
diff --git a/pymodbus/server/simulator/main.py b/pymodbus/server/simulator/main.py
index 09af492..086d724 100755
--- a/pymodbus/server/simulator/main.py
+++ b/pymodbus/server/simulator/main.py
@@ -41,6 +41,7 @@ options:
"""
import argparse
import asyncio
+import os
from pymodbus import pymodbus_apply_logging_config
from pymodbus.logging import Log
@@ -70,6 +71,7 @@ def get_commandline():
parser.add_argument(
"--http_port",
help="use <http_port> as port to bind http listen",
+ default=8081,
type=str,
)
parser.add_argument(
@@ -82,6 +84,7 @@ def get_commandline():
parser.add_argument(
"--json_file",
help='name of json file, default is "setup.json"',
+ default=os.path.join(os.path.dirname(__file__), "setup.json"),
type=str,
)
parser.add_argument(
@@ -109,8 +112,6 @@ def get_commandline():
async def run_main():
"""Run server async."""
cmd_args = get_commandline()
- cmd_args["http_port"] = 8081
- cmd_args["json_file"] = "./pymodbus/server/simulator/setup.json"
task = ModbusSimulatorServer(**cmd_args)
await task.run_forever()
```
- please provide some guidance which way to go, then I'll create an according pull request
</issue>
<code>
[start of pymodbus/server/simulator/main.py]
1 #!/usr/bin/env python3
2 """HTTP server for modbus simulator.
3
4 The modbus simulator contain 3 distint parts:
5
6 - Datastore simulator, to define registers and their behaviour including actions: (simulator)(../../datastore/simulator.py)
7 - Modbus server: (server)(./http_server.py)
8 - HTTP server with REST API and web pages providing an online console in your browser
9
10 Multiple setups for different server types and/or devices are prepared in a (json file)(./setup.json), the detailed configuration is explained in (doc)(README.md)
11
12 The command line parameters are kept to a minimum:
13
14 usage: main.py [-h] [--modbus_server MODBUS_SERVER]
15 [--modbus_device MODBUS_DEVICE] [--http_host HTTP_HOST]
16 [--http_port HTTP_PORT]
17 [--log {critical,error,warning,info,debug}]
18 [--json_file JSON_FILE]
19 [--custom_actions_module CUSTOM_ACTIONS_MODULE]
20
21 Modbus server with REST-API and web server
22
23 options:
24 -h, --help show this help message and exit
25 --modbus_server MODBUS_SERVER
26 use <modbus_server> from server_list in json file
27 --modbus_device MODBUS_DEVICE
28 use <modbus_device> from device_list in json file
29 --http_host HTTP_HOST
30 use <http_host> as host to bind http listen
31 --http_port HTTP_PORT
32 use <http_port> as port to bind http listen
33 --log {critical,error,warning,info,debug}
34 set log level, default is info
35 --log_file LOG_FILE
36 name of server log file, default is "server.log"
37 --json_file JSON_FILE
38 name of json_file, default is "setup.json"
39 --custom_actions_module CUSTOM_ACTIONS_MODULE
40 python file with custom actions, default is none
41 """
42 import argparse
43 import asyncio
44
45 from pymodbus import pymodbus_apply_logging_config
46 from pymodbus.logging import Log
47 from pymodbus.server.simulator.http_server import ModbusSimulatorServer
48
49
50 def get_commandline():
51 """Get command line arguments."""
52 parser = argparse.ArgumentParser(
53 description="Modbus server with REST-API and web server"
54 )
55 parser.add_argument(
56 "--modbus_server",
57 help="use <modbus_server> from server_list in json file",
58 type=str,
59 )
60 parser.add_argument(
61 "--modbus_device",
62 help="use <modbus_device> from device_list in json file",
63 type=str,
64 )
65 parser.add_argument(
66 "--http_host",
67 help="use <http_host> as host to bind http listen",
68 type=str,
69 )
70 parser.add_argument(
71 "--http_port",
72 help="use <http_port> as port to bind http listen",
73 type=str,
74 )
75 parser.add_argument(
76 "--log",
77 choices=["critical", "error", "warning", "info", "debug"],
78 help="set log level, default is info",
79 default="info",
80 type=str,
81 )
82 parser.add_argument(
83 "--json_file",
84 help='name of json file, default is "setup.json"',
85 type=str,
86 )
87 parser.add_argument(
88 "--log_file",
89 help='name of server log file, default is "server.log"',
90 type=str,
91 )
92 parser.add_argument(
93 "--custom_actions_module",
94 help="python file with custom actions, default is none",
95 type=str,
96 )
97 args = parser.parse_args()
98 pymodbus_apply_logging_config(args.log.upper())
99 Log.info("Start simulator")
100 cmd_args = {}
101 for argument in args.__dict__:
102 if argument == "log":
103 continue
104 if args.__dict__[argument] is not None:
105 cmd_args[argument] = args.__dict__[argument]
106 return cmd_args
107
108
109 async def run_main():
110 """Run server async."""
111 cmd_args = get_commandline()
112 cmd_args["http_port"] = 8081
113 cmd_args["json_file"] = "./pymodbus/server/simulator/setup.json"
114 task = ModbusSimulatorServer(**cmd_args)
115 await task.run_forever()
116
117
118 def main():
119 """Run server."""
120 asyncio.run(run_main(), debug=True)
121
122
123 if __name__ == "__main__":
124 main()
125
[end of pymodbus/server/simulator/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pymodbus/server/simulator/main.py b/pymodbus/server/simulator/main.py
--- a/pymodbus/server/simulator/main.py
+++ b/pymodbus/server/simulator/main.py
@@ -66,6 +66,7 @@
"--http_host",
help="use <http_host> as host to bind http listen",
type=str,
+ default=8081,
)
parser.add_argument(
"--http_port",
@@ -83,6 +84,7 @@
"--json_file",
help='name of json file, default is "setup.json"',
type=str,
+ default="./pymodbus/server/simulator/setup.json",
)
parser.add_argument(
"--log_file",
@@ -109,8 +111,6 @@
async def run_main():
"""Run server async."""
cmd_args = get_commandline()
- cmd_args["http_port"] = 8081
- cmd_args["json_file"] = "./pymodbus/server/simulator/setup.json"
task = ModbusSimulatorServer(**cmd_args)
await task.run_forever()
| {"golden_diff": "diff --git a/pymodbus/server/simulator/main.py b/pymodbus/server/simulator/main.py\n--- a/pymodbus/server/simulator/main.py\n+++ b/pymodbus/server/simulator/main.py\n@@ -66,6 +66,7 @@\n \"--http_host\",\n help=\"use <http_host> as host to bind http listen\",\n type=str,\n+ default=8081,\n )\n parser.add_argument(\n \"--http_port\",\n@@ -83,6 +84,7 @@\n \"--json_file\",\n help='name of json file, default is \"setup.json\"',\n type=str,\n+ default=\"./pymodbus/server/simulator/setup.json\",\n )\n parser.add_argument(\n \"--log_file\",\n@@ -109,8 +111,6 @@\n async def run_main():\n \"\"\"Run server async.\"\"\"\n cmd_args = get_commandline()\n- cmd_args[\"http_port\"] = 8081\n- cmd_args[\"json_file\"] = \"./pymodbus/server/simulator/setup.json\"\n task = ModbusSimulatorServer(**cmd_args)\n await task.run_forever()\n", "issue": "pymodbus.simulator ignores command line arguments \"http_port\" and \"json_file\"\n### Versions\r\n\r\n- Python: 3.10.10\r\n- OS: Fedora 36\r\n- Pymodbus: 3.2.2\r\n\r\n### Pymodbus Specific\r\n\r\n- Server: tcp\r\n- Client: tcp\r\n\r\n### Description\r\n\r\n- when starting pymodbus.simulator with a specific json file, an error stating a different filename is shown\r\n```\r\n$ pymodbus.simulator --json_file /tmp/setup.json \r\n2023-03-25 20:06:03,532 INFO logging:96 Start simulator\r\nTraceback (most recent call last):\r\n File \"/usr/bin/pymodbus.simulator\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/usr/lib/python3.10/site-packages/pymodbus/server/simulator/main.py\", line 119, in main\r\n asyncio.run(run_main(), debug=True)\r\n File \"/usr/lib64/python3.10/asyncio/runners.py\", line 44, in run\r\n return loop.run_until_complete(main)\r\n File \"/usr/lib64/python3.10/asyncio/base_events.py\", line 649, in run_until_complete\r\n return future.result()\r\n File \"/usr/lib/python3.10/site-packages/pymodbus/server/simulator/main.py\", line 113, in run_main\r\n task = ModbusSimulatorServer(**cmd_args)\r\n File \"/usr/lib/python3.10/site-packages/pymodbus/server/simulator/http_server.py\", line 130, in __init__\r\n with open(json_file, encoding=\"utf-8\") as file:\r\nFileNotFoundError: [Errno 2] No such file or directory: './pymodbus/server/simulator/setup.json'\r\n```\r\n- the problem is in https://github.com/pymodbus-dev/pymodbus/blob/c2db53cad3c60834bffecd23db699f6982f914fb/pymodbus/server/simulator/main.py#L113 where the command line arguments are overwritten\r\n- if it is the intention having default values, a patch keeping exactly these defaults would look like this:\r\n```\r\ndiff --git a/pymodbus/server/simulator/main.py b/pymodbus/server/simulator/main.py\r\nindex 09af492..443e1a6 100755\r\n--- a/pymodbus/server/simulator/main.py\r\n+++ b/pymodbus/server/simulator/main.py\r\n@@ -70,6 +70,7 @@ def get_commandline():\r\n parser.add_argument(\r\n \"--http_port\",\r\n help=\"use <http_port> as port to bind http listen\",\r\n+ default=8081,\r\n type=str,\r\n )\r\n parser.add_argument(\r\n@@ -82,6 +83,7 @@ def get_commandline():\r\n parser.add_argument(\r\n \"--json_file\",\r\n help='name of json file, default is \"setup.json\"',\r\n+ default=\"./pymodbus/server/simulator/setup.json\",\r\n type=str,\r\n )\r\n parser.add_argument(\r\n@@ -109,8 +111,6 @@ def get_commandline():\r\n async def run_main():\r\n \"\"\"Run server async.\"\"\"\r\n cmd_args = get_commandline()\r\n- cmd_args[\"http_port\"] = 8081\r\n- cmd_args[\"json_file\"] = \"./pymodbus/server/simulator/setup.json\"\r\n task = ModbusSimulatorServer(**cmd_args)\r\n await task.run_forever()\r\n ```\r\n- however, for running pymodbus.simulator after installing without any arguments, I would suggest the following:\r\n - install the default setup.json e.g. into the same directory as main.py of the simulator\r\n - use a diff like this:\r\n```\r\ndiff --git a/pymodbus/server/simulator/main.py b/pymodbus/server/simulator/main.py\r\nindex 09af492..086d724 100755\r\n--- a/pymodbus/server/simulator/main.py\r\n+++ b/pymodbus/server/simulator/main.py\r\n@@ -41,6 +41,7 @@ options:\r\n \"\"\"\r\n import argparse\r\n import asyncio\r\n+import os\r\n \r\n from pymodbus import pymodbus_apply_logging_config\r\n from pymodbus.logging import Log\r\n@@ -70,6 +71,7 @@ def get_commandline():\r\n parser.add_argument(\r\n \"--http_port\",\r\n help=\"use <http_port> as port to bind http listen\",\r\n+ default=8081,\r\n type=str,\r\n )\r\n parser.add_argument(\r\n@@ -82,6 +84,7 @@ def get_commandline():\r\n parser.add_argument(\r\n \"--json_file\",\r\n help='name of json file, default is \"setup.json\"',\r\n+ default=os.path.join(os.path.dirname(__file__), \"setup.json\"),\r\n type=str,\r\n )\r\n parser.add_argument(\r\n@@ -109,8 +112,6 @@ def get_commandline():\r\n async def run_main():\r\n \"\"\"Run server async.\"\"\"\r\n cmd_args = get_commandline()\r\n- cmd_args[\"http_port\"] = 8081\r\n- cmd_args[\"json_file\"] = \"./pymodbus/server/simulator/setup.json\"\r\n task = ModbusSimulatorServer(**cmd_args)\r\n await task.run_forever()\r\n \r\n```\r\n- please provide some guidance which way to go, then I'll create an according pull request\n", "before_files": [{"content": "#!/usr/bin/env python3\n\"\"\"HTTP server for modbus simulator.\n\nThe modbus simulator contain 3 distint parts:\n\n- Datastore simulator, to define registers and their behaviour including actions: (simulator)(../../datastore/simulator.py)\n- Modbus server: (server)(./http_server.py)\n- HTTP server with REST API and web pages providing an online console in your browser\n\nMultiple setups for different server types and/or devices are prepared in a (json file)(./setup.json), the detailed configuration is explained in (doc)(README.md)\n\nThe command line parameters are kept to a minimum:\n\nusage: main.py [-h] [--modbus_server MODBUS_SERVER]\n [--modbus_device MODBUS_DEVICE] [--http_host HTTP_HOST]\n [--http_port HTTP_PORT]\n [--log {critical,error,warning,info,debug}]\n [--json_file JSON_FILE]\n [--custom_actions_module CUSTOM_ACTIONS_MODULE]\n\nModbus server with REST-API and web server\n\noptions:\n -h, --help show this help message and exit\n --modbus_server MODBUS_SERVER\n use <modbus_server> from server_list in json file\n --modbus_device MODBUS_DEVICE\n use <modbus_device> from device_list in json file\n --http_host HTTP_HOST\n use <http_host> as host to bind http listen\n --http_port HTTP_PORT\n use <http_port> as port to bind http listen\n --log {critical,error,warning,info,debug}\n set log level, default is info\n --log_file LOG_FILE\n name of server log file, default is \"server.log\"\n --json_file JSON_FILE\n name of json_file, default is \"setup.json\"\n --custom_actions_module CUSTOM_ACTIONS_MODULE\n python file with custom actions, default is none\n\"\"\"\nimport argparse\nimport asyncio\n\nfrom pymodbus import pymodbus_apply_logging_config\nfrom pymodbus.logging import Log\nfrom pymodbus.server.simulator.http_server import ModbusSimulatorServer\n\n\ndef get_commandline():\n \"\"\"Get command line arguments.\"\"\"\n parser = argparse.ArgumentParser(\n description=\"Modbus server with REST-API and web server\"\n )\n parser.add_argument(\n \"--modbus_server\",\n help=\"use <modbus_server> from server_list in json file\",\n type=str,\n )\n parser.add_argument(\n \"--modbus_device\",\n help=\"use <modbus_device> from device_list in json file\",\n type=str,\n )\n parser.add_argument(\n \"--http_host\",\n help=\"use <http_host> as host to bind http listen\",\n type=str,\n )\n parser.add_argument(\n \"--http_port\",\n help=\"use <http_port> as port to bind http listen\",\n type=str,\n )\n parser.add_argument(\n \"--log\",\n choices=[\"critical\", \"error\", \"warning\", \"info\", \"debug\"],\n help=\"set log level, default is info\",\n default=\"info\",\n type=str,\n )\n parser.add_argument(\n \"--json_file\",\n help='name of json file, default is \"setup.json\"',\n type=str,\n )\n parser.add_argument(\n \"--log_file\",\n help='name of server log file, default is \"server.log\"',\n type=str,\n )\n parser.add_argument(\n \"--custom_actions_module\",\n help=\"python file with custom actions, default is none\",\n type=str,\n )\n args = parser.parse_args()\n pymodbus_apply_logging_config(args.log.upper())\n Log.info(\"Start simulator\")\n cmd_args = {}\n for argument in args.__dict__:\n if argument == \"log\":\n continue\n if args.__dict__[argument] is not None:\n cmd_args[argument] = args.__dict__[argument]\n return cmd_args\n\n\nasync def run_main():\n \"\"\"Run server async.\"\"\"\n cmd_args = get_commandline()\n cmd_args[\"http_port\"] = 8081\n cmd_args[\"json_file\"] = \"./pymodbus/server/simulator/setup.json\"\n task = ModbusSimulatorServer(**cmd_args)\n await task.run_forever()\n\n\ndef main():\n \"\"\"Run server.\"\"\"\n asyncio.run(run_main(), debug=True)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "pymodbus/server/simulator/main.py"}]} | 2,945 | 253 |
gh_patches_debug_25843 | rasdani/github-patches | git_diff | fidals__shopelectro-965 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Create a custom page for 500 error
The page should have an apology and an suggestion to continue the purchase with the help of a consultant
</issue>
<code>
[start of shopelectro/urls.py]
1 from datetime import timedelta
2 from collections import OrderedDict
3
4 from django.conf import settings
5 from django.conf.urls import url, include
6 from django.conf.urls.static import static
7 from django.contrib.sitemaps.views import sitemap
8 from django.views.decorators.cache import cache_page, never_cache
9
10 from pages.views import RobotsView, SitemapPage
11 from pages.urls import custom_page_url
12
13 from shopelectro import sitemaps, views
14 from shopelectro.admin import se_admin
15
16
17 def cached_time(*args, **kwargs) -> int:
18 """Return value of time for caching in seconds."""
19 return int(timedelta(*args, **kwargs).total_seconds())
20
21
22 # Orders sitemaps instances
23 sitemaps = OrderedDict([
24 ('index', sitemaps.IndexSitemap),
25 ('category', sitemaps.CategorySitemap),
26 ('category-with-tags', sitemaps.CategoryWithTagsSitemap),
27 ('products', sitemaps.ProductSitemap),
28 ('site', sitemaps.PagesSitemap)
29 ])
30
31 # disable cache
32 if settings.DEBUG:
33 def cache_page(arg): # Ignore PyFlakesBear
34 if callable(arg):
35 return arg
36 return cache_page
37
38 cached_60d = cache_page(cached_time(days=60))
39 cached_2h = cache_page(cached_time(hours=2))
40
41 admin_urls = [
42 url(r'^', se_admin.urls),
43 url(r'^autocomplete/$', views.AdminAutocomplete.as_view(), name='admin_autocomplete'),
44 url(r'^get-tree-items/$', views.Tree.as_view()),
45 url(r'^redirect-to-product/$', views.RedirectToProduct.as_view()),
46 url(r'^table-editor-api/$', views.TableEditorAPI.as_view()),
47 url(r'^select2/', include('django_select2.urls')),
48 ]
49
50 catalog_urls = [
51 # "category" group
52 url(r'^categories/(?P<slug>[\w-]+)/$',
53 cached_2h(views.CategoryPage.as_view()), name='category'),
54 url(r'^categories/(?P<slug>[\w-]+)/tags/(?P<tags>[\w_-]+)/$',
55 cached_2h(views.CategoryPage.as_view()), name='category'),
56 url(r'^categories/(?P<slug>[\w-]+)/(?P<sorting>[0-9]*)/$',
57 views.CategoryPage.as_view(), name='category'),
58 url(r'^categories/(?P<slug>[\w-]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\w_-]+)/$',
59 views.CategoryPage.as_view(), name='category'),
60 # "load more" group
61 url(r'categories/(?P<slug>[\w-]+)/load-more/'
62 r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/$',
63 views.load_more, name='load_more'),
64 url(r'categories/(?P<slug>[\w-]+)/load-more/'
65 r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\w_-]+)/$',
66 views.load_more, name='load_more'),
67 # rest of urls
68 url(r'^no-images/$', views.ProductsWithoutImages.as_view(),
69 name='products_without_images'),
70 url(r'^no-text/$', views.ProductsWithoutText.as_view(),
71 name='products_without_text'),
72 url(r'^products/(?P<product_vendor_code>[0-9]+)/$',
73 views.ProductPage.as_view(), name='product'),
74 ]
75
76 service_urls = [
77 url(r'^ya-kassa/aviso/$', views.yandex_aviso, name='yandex_aviso'),
78 url(r'^ya-kassa/check/$', views.yandex_check, name='yandex_check'),
79 url(r'^ya-feedback/redirect/$',
80 views.ya_feedback_with_redirect, name='ya_feedback_with_redirect'),
81 url(r'^ya-feedback/request/$',
82 views.ya_feedback_request, name='ya_feedback_request'),
83 ]
84
85 search_urls = [
86 url(r'^autocomplete/$', views.Autocomplete.as_view(), name='autocomplete'),
87 ]
88
89 ecommerce_urls = [
90 url(r'^cart-get/$', never_cache(views.Cart.as_view()), name='cart_get'),
91 url(r'^cart-add/$', views.AddToCart.as_view(), name='cart_add'),
92 url(r'^cart-change/$', views.ChangeCount.as_view(), name='cart_set_count'),
93 url(r'^cart-flush/$', views.FlushCart.as_view(), name='cart_flush'),
94 url(r'^cart-remove/$', views.RemoveFromCart.as_view(), name='cart_remove'),
95 url(r'^order-call/$', views.order_call),
96 url(r'^one-click-buy/$', views.one_click_buy),
97 url(r'^yandex-order/$', views.YandexOrder.as_view()),
98 url(r'', include('ecommerce.urls')),
99 ]
100
101 custom_pages = [
102 # can't use just `r'^(?P<page>)$'` with no args to views, because reverse don't work
103 custom_page_url(r'^$', cached_2h(views.IndexPage.as_view()), {'page': ''}, name='index'),
104 custom_page_url(r'^(?P<page>robots\.txt)$', RobotsView.as_view()),
105 custom_page_url(r'^(?P<page>search)/$', views.Search.as_view()),
106 custom_page_url(r'^(?P<page>catalog)/$', cached_2h(views.category_matrix)),
107 custom_page_url(r'^(?P<page>sitemap)/$', SitemapPage.as_view()),
108 # these pages should show only actual state
109 custom_page_url(r'^shop/(?P<page>order)/$', never_cache(views.OrderPage.as_view())),
110 custom_page_url(r'^shop/(?P<page>order-success)/$', never_cache(views.OrderSuccess.as_view())),
111 ]
112
113 urlpatterns = [
114 url('', include(custom_pages)),
115 url(r'^admin/', include(admin_urls)),
116 url(r'^catalog/', include(catalog_urls)),
117 url(r'^pages/', include('pages.urls')),
118 url(r'^save-feedback/$', views.save_feedback),
119 url(r'^delete-feedback/$', views.delete_feedback),
120 url(r'^set-view-type/$', views.set_view_type, name='set_view_type'),
121 url(r'^shop/', include(ecommerce_urls)),
122 url(r'^search/', include(search_urls)),
123 url(r'^service/', include(service_urls)),
124 url(r'^sitemap\.xml$', cached_60d(sitemap), {'sitemaps': sitemaps}, name='sitemap'),
125 ]
126
127 if settings.DEBUG:
128 import debug_toolbar
129
130 urlpatterns += [
131 url(r'^__debug__/', include(debug_toolbar.urls)),
132 *static(settings.STATIC_URL, document_root=settings.STATIC_ROOT),
133 *static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT),
134 ]
135
[end of shopelectro/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/shopelectro/urls.py b/shopelectro/urls.py
--- a/shopelectro/urls.py
+++ b/shopelectro/urls.py
@@ -1,15 +1,15 @@
-from datetime import timedelta
from collections import OrderedDict
+from datetime import timedelta
from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib.sitemaps.views import sitemap
from django.views.decorators.cache import cache_page, never_cache
+from django.views.generic import TemplateView
-from pages.views import RobotsView, SitemapPage
from pages.urls import custom_page_url
-
+from pages.views import RobotsView, SitemapPage
from shopelectro import sitemaps, views
from shopelectro.admin import se_admin
@@ -132,3 +132,11 @@
*static(settings.STATIC_URL, document_root=settings.STATIC_ROOT),
*static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT),
]
+
+# Test and Debug environments replace real 404 and 500 error with stack traces.
+# We expose real 404 and 500 pages with separated urls to test them.
+if settings.TEST_ENV or settings.DEBUG:
+ urlpatterns += [
+ url(r'^404/$', TemplateView.as_view(template_name='404.html')),
+ url(r'^500/$', TemplateView.as_view(template_name='500.html')),
+ ]
| {"golden_diff": "diff --git a/shopelectro/urls.py b/shopelectro/urls.py\n--- a/shopelectro/urls.py\n+++ b/shopelectro/urls.py\n@@ -1,15 +1,15 @@\n-from datetime import timedelta\n from collections import OrderedDict\n+from datetime import timedelta\n \n from django.conf import settings\n from django.conf.urls import url, include\n from django.conf.urls.static import static\n from django.contrib.sitemaps.views import sitemap\n from django.views.decorators.cache import cache_page, never_cache\n+from django.views.generic import TemplateView\n \n-from pages.views import RobotsView, SitemapPage\n from pages.urls import custom_page_url\n-\n+from pages.views import RobotsView, SitemapPage\n from shopelectro import sitemaps, views\n from shopelectro.admin import se_admin\n \n@@ -132,3 +132,11 @@\n *static(settings.STATIC_URL, document_root=settings.STATIC_ROOT),\n *static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT),\n ]\n+\n+# Test and Debug environments replace real 404 and 500 error with stack traces.\n+# We expose real 404 and 500 pages with separated urls to test them.\n+if settings.TEST_ENV or settings.DEBUG:\n+ urlpatterns += [\n+ url(r'^404/$', TemplateView.as_view(template_name='404.html')),\n+ url(r'^500/$', TemplateView.as_view(template_name='500.html')),\n+ ]\n", "issue": "Create a custom page for 500 error\nThe page should have an apology and an suggestion to continue the purchase with the help of a consultant\n", "before_files": [{"content": "from datetime import timedelta\nfrom collections import OrderedDict\n\nfrom django.conf import settings\nfrom django.conf.urls import url, include\nfrom django.conf.urls.static import static\nfrom django.contrib.sitemaps.views import sitemap\nfrom django.views.decorators.cache import cache_page, never_cache\n\nfrom pages.views import RobotsView, SitemapPage\nfrom pages.urls import custom_page_url\n\nfrom shopelectro import sitemaps, views\nfrom shopelectro.admin import se_admin\n\n\ndef cached_time(*args, **kwargs) -> int:\n \"\"\"Return value of time for caching in seconds.\"\"\"\n return int(timedelta(*args, **kwargs).total_seconds())\n\n\n# Orders sitemaps instances\nsitemaps = OrderedDict([\n ('index', sitemaps.IndexSitemap),\n ('category', sitemaps.CategorySitemap),\n ('category-with-tags', sitemaps.CategoryWithTagsSitemap),\n ('products', sitemaps.ProductSitemap),\n ('site', sitemaps.PagesSitemap)\n])\n\n# disable cache\nif settings.DEBUG:\n def cache_page(arg): # Ignore PyFlakesBear\n if callable(arg):\n return arg\n return cache_page\n\ncached_60d = cache_page(cached_time(days=60))\ncached_2h = cache_page(cached_time(hours=2))\n\nadmin_urls = [\n url(r'^', se_admin.urls),\n url(r'^autocomplete/$', views.AdminAutocomplete.as_view(), name='admin_autocomplete'),\n url(r'^get-tree-items/$', views.Tree.as_view()),\n url(r'^redirect-to-product/$', views.RedirectToProduct.as_view()),\n url(r'^table-editor-api/$', views.TableEditorAPI.as_view()),\n url(r'^select2/', include('django_select2.urls')),\n]\n\ncatalog_urls = [\n # \"category\" group\n url(r'^categories/(?P<slug>[\\w-]+)/$',\n cached_2h(views.CategoryPage.as_view()), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/tags/(?P<tags>[\\w_-]+)/$',\n cached_2h(views.CategoryPage.as_view()), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/(?P<sorting>[0-9]*)/$',\n views.CategoryPage.as_view(), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\\w_-]+)/$',\n views.CategoryPage.as_view(), name='category'),\n # \"load more\" group\n url(r'categories/(?P<slug>[\\w-]+)/load-more/'\n r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/$',\n views.load_more, name='load_more'),\n url(r'categories/(?P<slug>[\\w-]+)/load-more/'\n r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\\w_-]+)/$',\n views.load_more, name='load_more'),\n # rest of urls\n url(r'^no-images/$', views.ProductsWithoutImages.as_view(),\n name='products_without_images'),\n url(r'^no-text/$', views.ProductsWithoutText.as_view(),\n name='products_without_text'),\n url(r'^products/(?P<product_vendor_code>[0-9]+)/$',\n views.ProductPage.as_view(), name='product'),\n]\n\nservice_urls = [\n url(r'^ya-kassa/aviso/$', views.yandex_aviso, name='yandex_aviso'),\n url(r'^ya-kassa/check/$', views.yandex_check, name='yandex_check'),\n url(r'^ya-feedback/redirect/$',\n views.ya_feedback_with_redirect, name='ya_feedback_with_redirect'),\n url(r'^ya-feedback/request/$',\n views.ya_feedback_request, name='ya_feedback_request'),\n]\n\nsearch_urls = [\n url(r'^autocomplete/$', views.Autocomplete.as_view(), name='autocomplete'),\n]\n\necommerce_urls = [\n url(r'^cart-get/$', never_cache(views.Cart.as_view()), name='cart_get'),\n url(r'^cart-add/$', views.AddToCart.as_view(), name='cart_add'),\n url(r'^cart-change/$', views.ChangeCount.as_view(), name='cart_set_count'),\n url(r'^cart-flush/$', views.FlushCart.as_view(), name='cart_flush'),\n url(r'^cart-remove/$', views.RemoveFromCart.as_view(), name='cart_remove'),\n url(r'^order-call/$', views.order_call),\n url(r'^one-click-buy/$', views.one_click_buy),\n url(r'^yandex-order/$', views.YandexOrder.as_view()),\n url(r'', include('ecommerce.urls')),\n]\n\ncustom_pages = [\n # can't use just `r'^(?P<page>)$'` with no args to views, because reverse don't work\n custom_page_url(r'^$', cached_2h(views.IndexPage.as_view()), {'page': ''}, name='index'),\n custom_page_url(r'^(?P<page>robots\\.txt)$', RobotsView.as_view()),\n custom_page_url(r'^(?P<page>search)/$', views.Search.as_view()),\n custom_page_url(r'^(?P<page>catalog)/$', cached_2h(views.category_matrix)),\n custom_page_url(r'^(?P<page>sitemap)/$', SitemapPage.as_view()),\n # these pages should show only actual state\n custom_page_url(r'^shop/(?P<page>order)/$', never_cache(views.OrderPage.as_view())),\n custom_page_url(r'^shop/(?P<page>order-success)/$', never_cache(views.OrderSuccess.as_view())),\n]\n\nurlpatterns = [\n url('', include(custom_pages)),\n url(r'^admin/', include(admin_urls)),\n url(r'^catalog/', include(catalog_urls)),\n url(r'^pages/', include('pages.urls')),\n url(r'^save-feedback/$', views.save_feedback),\n url(r'^delete-feedback/$', views.delete_feedback),\n url(r'^set-view-type/$', views.set_view_type, name='set_view_type'),\n url(r'^shop/', include(ecommerce_urls)),\n url(r'^search/', include(search_urls)),\n url(r'^service/', include(service_urls)),\n url(r'^sitemap\\.xml$', cached_60d(sitemap), {'sitemaps': sitemaps}, name='sitemap'),\n]\n\nif settings.DEBUG:\n import debug_toolbar\n\n urlpatterns += [\n url(r'^__debug__/', include(debug_toolbar.urls)),\n *static(settings.STATIC_URL, document_root=settings.STATIC_ROOT),\n *static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT),\n ]\n", "path": "shopelectro/urls.py"}]} | 2,296 | 324 |
gh_patches_debug_56718 | rasdani/github-patches | git_diff | mosaicml__composer-293 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ResNet56 default num_classes argument
## 🚀 Feature Request
The `num_classes` argument for [ResNet56_cifar10](https://github.com/mosaicml/composer/blob/main/composer/models/resnet56_cifar10/model.py) should have a default value `num_classes=10`.
## Motivation
It felt silly when writing a demo notebook to have to specify `num_classes=10` when calling `composer.models.CIFAR10_ResNet56(num_classes=10)`. The model has "cifar10" in its name, and even if it didn't, it's most common use is for cifar10.
## Implementation
Does it require any changes beyond the `__init__()` signature?
</issue>
<code>
[start of composer/models/resnet56_cifar10/model.py]
1 # Copyright 2021 MosaicML. All Rights Reserved.
2
3 from typing import List, Optional
4
5 from composer.models.base import MosaicClassifier
6 from composer.models.model_hparams import Initializer
7 from composer.models.resnets import CIFAR_ResNet
8
9
10 class CIFAR10_ResNet56(MosaicClassifier):
11 """A ResNet-56 model extending :class:`MosaicClassifier`.
12
13 See this `paper <https://arxiv.org/abs/1512.03385>`_ for details
14 on the residual network architecture.
15
16 Args:
17 num_classes (int): The number of classes for the model.
18 initializers (List[Initializer], optional): Initializers
19 for the model. ``None`` for no initialization.
20 (default: ``None``)
21 """
22
23 def __init__(
24 self,
25 num_classes: int,
26 initializers: Optional[List[Initializer]] = None,
27 ) -> None:
28 if initializers is None:
29 initializers = []
30
31 model = CIFAR_ResNet.get_model_from_name(
32 "cifar_resnet_56",
33 initializers,
34 num_classes,
35 )
36 super().__init__(module=model)
37
[end of composer/models/resnet56_cifar10/model.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/composer/models/resnet56_cifar10/model.py b/composer/models/resnet56_cifar10/model.py
--- a/composer/models/resnet56_cifar10/model.py
+++ b/composer/models/resnet56_cifar10/model.py
@@ -22,7 +22,7 @@
def __init__(
self,
- num_classes: int,
+ num_classes: int = 10,
initializers: Optional[List[Initializer]] = None,
) -> None:
if initializers is None:
| {"golden_diff": "diff --git a/composer/models/resnet56_cifar10/model.py b/composer/models/resnet56_cifar10/model.py\n--- a/composer/models/resnet56_cifar10/model.py\n+++ b/composer/models/resnet56_cifar10/model.py\n@@ -22,7 +22,7 @@\n \n def __init__(\n self,\n- num_classes: int,\n+ num_classes: int = 10,\n initializers: Optional[List[Initializer]] = None,\n ) -> None:\n if initializers is None:\n", "issue": "ResNet56 default num_classes argument\n## \ud83d\ude80 Feature Request\r\nThe `num_classes` argument for [ResNet56_cifar10](https://github.com/mosaicml/composer/blob/main/composer/models/resnet56_cifar10/model.py) should have a default value `num_classes=10`.\r\n\r\n## Motivation\r\n\r\nIt felt silly when writing a demo notebook to have to specify `num_classes=10` when calling `composer.models.CIFAR10_ResNet56(num_classes=10)`. The model has \"cifar10\" in its name, and even if it didn't, it's most common use is for cifar10.\r\n\r\n## Implementation\r\n\r\nDoes it require any changes beyond the `__init__()` signature?\n", "before_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nfrom typing import List, Optional\n\nfrom composer.models.base import MosaicClassifier\nfrom composer.models.model_hparams import Initializer\nfrom composer.models.resnets import CIFAR_ResNet\n\n\nclass CIFAR10_ResNet56(MosaicClassifier):\n \"\"\"A ResNet-56 model extending :class:`MosaicClassifier`.\n\n See this `paper <https://arxiv.org/abs/1512.03385>`_ for details\n on the residual network architecture.\n\n Args:\n num_classes (int): The number of classes for the model.\n initializers (List[Initializer], optional): Initializers\n for the model. ``None`` for no initialization.\n (default: ``None``)\n \"\"\"\n\n def __init__(\n self,\n num_classes: int,\n initializers: Optional[List[Initializer]] = None,\n ) -> None:\n if initializers is None:\n initializers = []\n\n model = CIFAR_ResNet.get_model_from_name(\n \"cifar_resnet_56\",\n initializers,\n num_classes,\n )\n super().__init__(module=model)\n", "path": "composer/models/resnet56_cifar10/model.py"}]} | 1,039 | 127 |
gh_patches_debug_1114 | rasdani/github-patches | git_diff | Pylons__pyramid-2224 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update to Sphinx 1.3.4 when released
There is a [bug in Sphinx 1.3.3 and 1.3.1](https://github.com/sphinx-doc/sphinx/issues/2189) (I haven't tried 1.3.2) where next and previous links in Sphinx documentation are broken when going into children and across sibling directories.
When 1.3.4 is released, we need to pin sphinx to 1.3.4, which will include the commit made 8 days after the 1.3.3 release.
</issue>
<code>
[start of setup.py]
1 ##############################################################################
2 #
3 # Copyright (c) 2008-2013 Agendaless Consulting and Contributors.
4 # All Rights Reserved.
5 #
6 # This software is subject to the provisions of the BSD-like license at
7 # http://www.repoze.org/LICENSE.txt. A copy of the license should accompany
8 # this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL
9 # EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,
10 # THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND
11 # FITNESS FOR A PARTICULAR PURPOSE
12 #
13 ##############################################################################
14
15 import os
16 import sys
17
18 from setuptools import setup, find_packages
19
20 py_version = sys.version_info[:2]
21
22 PY3 = py_version[0] == 3
23
24 if PY3:
25 if py_version < (3, 2):
26 raise RuntimeError('On Python 3, Pyramid requires Python 3.2 or better')
27 else:
28 if py_version < (2, 6):
29 raise RuntimeError('On Python 2, Pyramid requires Python 2.6 or better')
30
31 here = os.path.abspath(os.path.dirname(__file__))
32 try:
33 with open(os.path.join(here, 'README.rst')) as f:
34 README = f.read()
35 with open(os.path.join(here, 'CHANGES.txt')) as f:
36 CHANGES = f.read()
37 except IOError:
38 README = CHANGES = ''
39
40 install_requires=[
41 'setuptools',
42 'WebOb >= 1.3.1', # request.domain and CookieProfile
43 'repoze.lru >= 0.4', # py3 compat
44 'zope.interface >= 3.8.0', # has zope.interface.registry
45 'zope.deprecation >= 3.5.0', # py3 compat
46 'venusian >= 1.0a3', # ``ignore``
47 'translationstring >= 0.4', # py3 compat
48 'PasteDeploy >= 1.5.0', # py3 compat
49 ]
50
51 tests_require = [
52 'WebTest >= 1.3.1', # py3 compat
53 ]
54
55 if not PY3:
56 tests_require.append('zope.component>=3.11.0')
57
58 docs_extras = [
59 'Sphinx >= 1.3.1',
60 'docutils',
61 'repoze.sphinx.autointerface',
62 'pylons_sphinx_latesturl',
63 'pylons-sphinx-themes',
64 'sphinxcontrib-programoutput',
65 ]
66
67 testing_extras = tests_require + [
68 'nose',
69 'coverage',
70 'virtualenv', # for scaffolding tests
71 ]
72
73 setup(name='pyramid',
74 version='1.7.dev0',
75 description='The Pyramid Web Framework, a Pylons project',
76 long_description=README + '\n\n' + CHANGES,
77 classifiers=[
78 "Development Status :: 6 - Mature",
79 "Intended Audience :: Developers",
80 "Programming Language :: Python",
81 "Programming Language :: Python :: 2.6",
82 "Programming Language :: Python :: 2.7",
83 "Programming Language :: Python :: 3",
84 "Programming Language :: Python :: 3.2",
85 "Programming Language :: Python :: 3.3",
86 "Programming Language :: Python :: 3.4",
87 "Programming Language :: Python :: 3.5",
88 "Programming Language :: Python :: Implementation :: CPython",
89 "Programming Language :: Python :: Implementation :: PyPy",
90 "Framework :: Pyramid",
91 "Topic :: Internet :: WWW/HTTP",
92 "Topic :: Internet :: WWW/HTTP :: WSGI",
93 "License :: Repoze Public License",
94 ],
95 keywords='web wsgi pylons pyramid',
96 author="Chris McDonough, Agendaless Consulting",
97 author_email="[email protected]",
98 url="http://docs.pylonsproject.org/en/latest/docs/pyramid.html",
99 license="BSD-derived (http://www.repoze.org/LICENSE.txt)",
100 packages=find_packages(),
101 include_package_data=True,
102 zip_safe=False,
103 install_requires = install_requires,
104 extras_require = {
105 'testing':testing_extras,
106 'docs':docs_extras,
107 },
108 tests_require = tests_require,
109 test_suite="pyramid.tests",
110 entry_points = """\
111 [pyramid.scaffold]
112 starter=pyramid.scaffolds:StarterProjectTemplate
113 zodb=pyramid.scaffolds:ZODBProjectTemplate
114 alchemy=pyramid.scaffolds:AlchemyProjectTemplate
115 [pyramid.pshell_runner]
116 python=pyramid.scripts.pshell:python_shell_runner
117 [console_scripts]
118 pcreate = pyramid.scripts.pcreate:main
119 pserve = pyramid.scripts.pserve:main
120 pshell = pyramid.scripts.pshell:main
121 proutes = pyramid.scripts.proutes:main
122 pviews = pyramid.scripts.pviews:main
123 ptweens = pyramid.scripts.ptweens:main
124 prequest = pyramid.scripts.prequest:main
125 pdistreport = pyramid.scripts.pdistreport:main
126 [paste.server_runner]
127 wsgiref = pyramid.scripts.pserve:wsgiref_server_runner
128 cherrypy = pyramid.scripts.pserve:cherrypy_server_runner
129 """
130 )
131
132
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -56,7 +56,7 @@
tests_require.append('zope.component>=3.11.0')
docs_extras = [
- 'Sphinx >= 1.3.1',
+ 'Sphinx >= 1.3.4',
'docutils',
'repoze.sphinx.autointerface',
'pylons_sphinx_latesturl',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -56,7 +56,7 @@\n tests_require.append('zope.component>=3.11.0')\n \n docs_extras = [\n- 'Sphinx >= 1.3.1',\n+ 'Sphinx >= 1.3.4',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n", "issue": "Update to Sphinx 1.3.4 when released\nThere is a [bug in Sphinx 1.3.3 and 1.3.1](https://github.com/sphinx-doc/sphinx/issues/2189) (I haven't tried 1.3.2) where next and previous links in Sphinx documentation are broken when going into children and across sibling directories.\n\nWhen 1.3.4 is released, we need to pin sphinx to 1.3.4, which will include the commit made 8 days after the 1.3.3 release.\n\n", "before_files": [{"content": "##############################################################################\n#\n# Copyright (c) 2008-2013 Agendaless Consulting and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the BSD-like license at\n# http://www.repoze.org/LICENSE.txt. A copy of the license should accompany\n# this distribution. THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL\n# EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND\n# FITNESS FOR A PARTICULAR PURPOSE\n#\n##############################################################################\n\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\npy_version = sys.version_info[:2]\n\nPY3 = py_version[0] == 3\n\nif PY3:\n if py_version < (3, 2):\n raise RuntimeError('On Python 3, Pyramid requires Python 3.2 or better')\nelse:\n if py_version < (2, 6):\n raise RuntimeError('On Python 2, Pyramid requires Python 2.6 or better')\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n with open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\n with open(os.path.join(here, 'CHANGES.txt')) as f:\n CHANGES = f.read()\nexcept IOError:\n README = CHANGES = ''\n\ninstall_requires=[\n 'setuptools',\n 'WebOb >= 1.3.1', # request.domain and CookieProfile\n 'repoze.lru >= 0.4', # py3 compat\n 'zope.interface >= 3.8.0', # has zope.interface.registry\n 'zope.deprecation >= 3.5.0', # py3 compat\n 'venusian >= 1.0a3', # ``ignore``\n 'translationstring >= 0.4', # py3 compat\n 'PasteDeploy >= 1.5.0', # py3 compat\n ]\n\ntests_require = [\n 'WebTest >= 1.3.1', # py3 compat\n ]\n\nif not PY3:\n tests_require.append('zope.component>=3.11.0')\n\ndocs_extras = [\n 'Sphinx >= 1.3.1',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n 'pylons-sphinx-themes',\n 'sphinxcontrib-programoutput',\n ]\n\ntesting_extras = tests_require + [\n 'nose',\n 'coverage',\n 'virtualenv', # for scaffolding tests\n ]\n\nsetup(name='pyramid',\n version='1.7.dev0',\n description='The Pyramid Web Framework, a Pylons project',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Development Status :: 6 - Mature\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.2\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Framework :: Pyramid\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI\",\n \"License :: Repoze Public License\",\n ],\n keywords='web wsgi pylons pyramid',\n author=\"Chris McDonough, Agendaless Consulting\",\n author_email=\"[email protected]\",\n url=\"http://docs.pylonsproject.org/en/latest/docs/pyramid.html\",\n license=\"BSD-derived (http://www.repoze.org/LICENSE.txt)\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires = install_requires,\n extras_require = {\n 'testing':testing_extras,\n 'docs':docs_extras,\n },\n tests_require = tests_require,\n test_suite=\"pyramid.tests\",\n entry_points = \"\"\"\\\n [pyramid.scaffold]\n starter=pyramid.scaffolds:StarterProjectTemplate\n zodb=pyramid.scaffolds:ZODBProjectTemplate\n alchemy=pyramid.scaffolds:AlchemyProjectTemplate\n [pyramid.pshell_runner]\n python=pyramid.scripts.pshell:python_shell_runner\n [console_scripts]\n pcreate = pyramid.scripts.pcreate:main\n pserve = pyramid.scripts.pserve:main\n pshell = pyramid.scripts.pshell:main\n proutes = pyramid.scripts.proutes:main\n pviews = pyramid.scripts.pviews:main\n ptweens = pyramid.scripts.ptweens:main\n prequest = pyramid.scripts.prequest:main\n pdistreport = pyramid.scripts.pdistreport:main\n [paste.server_runner]\n wsgiref = pyramid.scripts.pserve:wsgiref_server_runner\n cherrypy = pyramid.scripts.pserve:cherrypy_server_runner\n \"\"\"\n )\n\n", "path": "setup.py"}]} | 2,096 | 106 |
gh_patches_debug_26038 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-473 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AW Harburg throws
With 1.28.0 fetching the calendar for aw_harburg_de stopped working.
Last working version probably was 1.26.x +- ... (skipped some updates)
config:```
```
waste_collection_schedule:
sources:
- name: aw_harburg_de
args:
level_1: "XXX"
level_2: "YYY"
customize:
- type: Hausmüll 14-täglich
alias: "waste"
icon: mdi:trash-can
- type: Gelber Sack
alias: "recycle"
icon: mdi:recycle
- type: Altpapier
alias: "paper"
icon: mdi:trash-can-outline
fetch_time: "04:00"
day_switch_time: "10:00"
```
level_1 + 2 are set to real levels according to website and were not changed
Pls find log attached
```
Logger: waste_collection_schedule.scraper
Source: custom_components/waste_collection_schedule/waste_collection_schedule/scraper.py:143
Integration: waste_collection_schedule ([documentation](https://github.com/mampfes/hacs_waste_collection_schedule#readme))
First occurred: 17:00:41 (1 occurrences)
Last logged: 17:00:41
fetch failed for source AW Harburg: Traceback (most recent call last): File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/scraper.py", line 141, in fetch entries = self._source.fetch() File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/aw_harburg_de.py", line 108, in fetch dates = self._ics.convert(r.text) File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/service/ICS.py", line 38, in convert events: List[Any] = icalevents.events( File "/usr/local/lib/python3.10/site-packages/icalevents/icalevents.py", line 50, in events found_events += parse_events(content, start=start, end=end) File "/usr/local/lib/python3.10/site-packages/icalevents/icalparser.py", line 250, in parse_events raise ValueError('Content is invalid!') ValueError: Content is invalid!
```
</issue>
<code>
[start of custom_components/waste_collection_schedule/waste_collection_schedule/source/aw_harburg_de.py]
1 import requests
2 from bs4 import BeautifulSoup
3 from waste_collection_schedule import Collection # type: ignore[attr-defined]
4 from waste_collection_schedule.service.ICS import ICS
5
6 TITLE = "AW Harburg"
7 DESCRIPTION = "Abfallwirtschaft Landkreis Harburg"
8 URL = "https://www.landkreis-harburg.de/bauen-umwelt/abfallwirtschaft/abfallkalender/"
9
10 TEST_CASES = {
11 "CityWithTwoLevels": {"level_1": "Hanstedt", "level_2": "Evendorf"},
12 "CityWithThreeLevels": {
13 "level_1": "Buchholz",
14 "level_2": "Buchholz mit Steinbeck (ohne Reindorf)",
15 "level_3": "Seppenser Mühlenweg Haus-Nr. 1 / 2",
16 },
17 }
18
19 HEADERS = {
20 "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64)",
21 }
22
23
24 class Source:
25 def __init__(self, level_1, level_2, level_3=None):
26 self._districts = [level_1, level_2, level_3]
27 self._ics = ICS()
28
29 def fetch(self):
30 # Use a session to keep cookies and stuff
31 session = requests.Session()
32
33 # Get the IDs of the districts on the first level
34 # Double loading is on purpose because sometimes the webpage has an overlay
35 # which is gone on the second try in a session
36 r = session.get(URL, headers=HEADERS)
37 if "Zur aufgerufenen Seite" in r.text:
38 r = session.get(URL, headers=HEADERS)
39 if r.status_code != 200:
40 raise Exception(f"Error: failed to fetch first url: {URL}")
41
42 # Get the IDs of the districts on the first level
43 id = self.parse_level(r.text, 1)
44
45 # Get the IDs of the districts on the second level
46 url = (
47 "https://www.landkreis-harburg.de/ajax/abfall_gebiete_struktur_select.html"
48 )
49 params = {
50 "parent": id,
51 "ebene": 1,
52 "portal": 1,
53 "selected_ebene": 0,
54 }
55 r = session.get(url, params=params, headers=HEADERS)
56 if r.status_code != 200:
57 raise Exception(f"Error: failed to fetch second url: {url}")
58
59 # Get the IDs of the districts on the second level
60 id = self.parse_level(r.text, 2)
61
62 # Get the IDs of the third level - if applicable
63 if self._districts[3 - 1] is not None:
64 # Get the IDs of the districts on the third level
65 params = {
66 "parent": id,
67 "ebene": 2,
68 "portal": 1,
69 "selected_ebene": 0,
70 }
71 r = session.get(url, params=params, headers=HEADERS)
72 if r.status_code != 200:
73 raise Exception(f"Error: failed to fetch third url: {url}")
74
75 # Get the IDs of the districts on the third level
76 id = self.parse_level(r.text, 3)
77
78 # Prepare data for the real web request
79 url = "https://www.landkreis-harburg.de/abfallkalender/abfallkalender_struktur_daten_suche.html"
80 params = {
81 "selected_ebene": id,
82 "owner": 20100,
83 }
84 r = session.get(url, params=params, headers=HEADERS)
85
86 # Sometimes there is no garbage calendar available
87 if "Es sind keine Abfuhrbezirke hinterlegt." in r.text:
88 raise Exception(
89 f'Error: "Es sind keine Abfuhrbezirke hinterlegt." for "{self._districts[3-1]}". Please use different input data.'
90 )
91
92 soup = BeautifulSoup(r.text, features="html.parser")
93 links = soup.find_all("a")
94 ical_url = ""
95 for any_link in links:
96 if " als iCal" in any_link.text:
97 ical_url = any_link.get("href")
98
99 if "ical.html" not in ical_url:
100 raise Exception("No ical Link in the result: " + str(links))
101
102 # Get the final data
103 r = requests.get(ical_url, headers=HEADERS)
104 if not r.ok:
105 raise Exception(f"Error: failed to fetch url: {ical_url}")
106
107 # Parse ics file
108 dates = self._ics.convert(r.text)
109
110 entries = []
111 for d in dates:
112 entries.append(Collection(d[0], d[1]))
113 return entries
114
115 def parse_level(self, response, level):
116 soup = BeautifulSoup(response, features="html.parser")
117 select_content = soup.find_all("select", id=f"strukturEbene{level}")
118 soup = BeautifulSoup(str(select_content), features="html.parser")
119 options_content = soup.find_all("option")
120 level_ids = {}
121 for option in options_content:
122 # Ignore the "Bitte wählen..."
123 if option.get("value") != "0":
124 level_ids[option.text] = option.get("value")
125
126 if level_ids == {}:
127 raise Exception(f"Error: Level {level} Dictionary empty")
128
129 if self._districts[level - 1] not in level_ids:
130 raise Exception(
131 f"Error: District {self._districts[level]} is not in the dictionary: {level_ids}"
132 )
133
134 return level_ids[self._districts[level - 1]]
135
[end of custom_components/waste_collection_schedule/waste_collection_schedule/source/aw_harburg_de.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/aw_harburg_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/aw_harburg_de.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/aw_harburg_de.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/aw_harburg_de.py
@@ -91,25 +91,26 @@
soup = BeautifulSoup(r.text, features="html.parser")
links = soup.find_all("a")
- ical_url = ""
+ ical_urls = []
for any_link in links:
if " als iCal" in any_link.text:
- ical_url = any_link.get("href")
-
- if "ical.html" not in ical_url:
- raise Exception("No ical Link in the result: " + str(links))
-
- # Get the final data
- r = requests.get(ical_url, headers=HEADERS)
- if not r.ok:
- raise Exception(f"Error: failed to fetch url: {ical_url}")
-
- # Parse ics file
- dates = self._ics.convert(r.text)
+ # multiple links occur during year transition
+ ical_urls.append(any_link.get("href"))
+ # Get the final data for all links
entries = []
- for d in dates:
- entries.append(Collection(d[0], d[1]))
+ for ical_url in ical_urls:
+ r = requests.get(ical_url, headers=HEADERS)
+ r.raise_for_status()
+
+ # Parse ics file
+ try:
+ dates = self._ics.convert(r.text)
+
+ for d in dates:
+ entries.append(Collection(d[0], d[1]))
+ except ValueError:
+ pass # during year transition the ical for the next year may be empty
return entries
def parse_level(self, response, level):
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/aw_harburg_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/aw_harburg_de.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/aw_harburg_de.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/aw_harburg_de.py\n@@ -91,25 +91,26 @@\n \n soup = BeautifulSoup(r.text, features=\"html.parser\")\n links = soup.find_all(\"a\")\n- ical_url = \"\"\n+ ical_urls = []\n for any_link in links:\n if \" als iCal\" in any_link.text:\n- ical_url = any_link.get(\"href\")\n-\n- if \"ical.html\" not in ical_url:\n- raise Exception(\"No ical Link in the result: \" + str(links))\n-\n- # Get the final data\n- r = requests.get(ical_url, headers=HEADERS)\n- if not r.ok:\n- raise Exception(f\"Error: failed to fetch url: {ical_url}\")\n-\n- # Parse ics file\n- dates = self._ics.convert(r.text)\n+ # multiple links occur during year transition\n+ ical_urls.append(any_link.get(\"href\"))\n \n+ # Get the final data for all links\n entries = []\n- for d in dates:\n- entries.append(Collection(d[0], d[1]))\n+ for ical_url in ical_urls:\n+ r = requests.get(ical_url, headers=HEADERS)\n+ r.raise_for_status()\n+\n+ # Parse ics file\n+ try:\n+ dates = self._ics.convert(r.text)\n+\n+ for d in dates:\n+ entries.append(Collection(d[0], d[1]))\n+ except ValueError:\n+ pass # during year transition the ical for the next year may be empty\n return entries\n \n def parse_level(self, response, level):\n", "issue": "AW Harburg throws\nWith 1.28.0 fetching the calendar for aw_harburg_de stopped working.\r\nLast working version probably was 1.26.x +- ... (skipped some updates)\r\nconfig:```\r\n```\r\n\r\nwaste_collection_schedule:\r\n sources:\r\n - name: aw_harburg_de\r\n args:\r\n level_1: \"XXX\"\r\n level_2: \"YYY\"\r\n customize:\r\n - type: Hausm\u00fcll 14-t\u00e4glich\r\n alias: \"waste\"\r\n icon: mdi:trash-can\r\n - type: Gelber Sack\r\n alias: \"recycle\"\r\n icon: mdi:recycle\r\n - type: Altpapier\r\n alias: \"paper\"\r\n icon: mdi:trash-can-outline\r\n fetch_time: \"04:00\"\r\n day_switch_time: \"10:00\"\r\n```\r\nlevel_1 + 2 are set to real levels according to website and were not changed\r\nPls find log attached\r\n```\r\n\r\nLogger: waste_collection_schedule.scraper\r\nSource: custom_components/waste_collection_schedule/waste_collection_schedule/scraper.py:143\r\nIntegration: waste_collection_schedule ([documentation](https://github.com/mampfes/hacs_waste_collection_schedule#readme))\r\nFirst occurred: 17:00:41 (1 occurrences)\r\nLast logged: 17:00:41\r\nfetch failed for source AW Harburg: Traceback (most recent call last): File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/scraper.py\", line 141, in fetch entries = self._source.fetch() File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/aw_harburg_de.py\", line 108, in fetch dates = self._ics.convert(r.text) File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/service/ICS.py\", line 38, in convert events: List[Any] = icalevents.events( File \"/usr/local/lib/python3.10/site-packages/icalevents/icalevents.py\", line 50, in events found_events += parse_events(content, start=start, end=end) File \"/usr/local/lib/python3.10/site-packages/icalevents/icalparser.py\", line 250, in parse_events raise ValueError('Content is invalid!') ValueError: Content is invalid!\r\n```\n", "before_files": [{"content": "import requests\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"AW Harburg\"\nDESCRIPTION = \"Abfallwirtschaft Landkreis Harburg\"\nURL = \"https://www.landkreis-harburg.de/bauen-umwelt/abfallwirtschaft/abfallkalender/\"\n\nTEST_CASES = {\n \"CityWithTwoLevels\": {\"level_1\": \"Hanstedt\", \"level_2\": \"Evendorf\"},\n \"CityWithThreeLevels\": {\n \"level_1\": \"Buchholz\",\n \"level_2\": \"Buchholz mit Steinbeck (ohne Reindorf)\",\n \"level_3\": \"Seppenser M\u00fchlenweg Haus-Nr. 1 / 2\",\n },\n}\n\nHEADERS = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; Win64; x64)\",\n}\n\n\nclass Source:\n def __init__(self, level_1, level_2, level_3=None):\n self._districts = [level_1, level_2, level_3]\n self._ics = ICS()\n\n def fetch(self):\n # Use a session to keep cookies and stuff\n session = requests.Session()\n\n # Get the IDs of the districts on the first level\n # Double loading is on purpose because sometimes the webpage has an overlay\n # which is gone on the second try in a session\n r = session.get(URL, headers=HEADERS)\n if \"Zur aufgerufenen Seite\" in r.text:\n r = session.get(URL, headers=HEADERS)\n if r.status_code != 200:\n raise Exception(f\"Error: failed to fetch first url: {URL}\")\n\n # Get the IDs of the districts on the first level\n id = self.parse_level(r.text, 1)\n\n # Get the IDs of the districts on the second level\n url = (\n \"https://www.landkreis-harburg.de/ajax/abfall_gebiete_struktur_select.html\"\n )\n params = {\n \"parent\": id,\n \"ebene\": 1,\n \"portal\": 1,\n \"selected_ebene\": 0,\n }\n r = session.get(url, params=params, headers=HEADERS)\n if r.status_code != 200:\n raise Exception(f\"Error: failed to fetch second url: {url}\")\n\n # Get the IDs of the districts on the second level\n id = self.parse_level(r.text, 2)\n\n # Get the IDs of the third level - if applicable\n if self._districts[3 - 1] is not None:\n # Get the IDs of the districts on the third level\n params = {\n \"parent\": id,\n \"ebene\": 2,\n \"portal\": 1,\n \"selected_ebene\": 0,\n }\n r = session.get(url, params=params, headers=HEADERS)\n if r.status_code != 200:\n raise Exception(f\"Error: failed to fetch third url: {url}\")\n\n # Get the IDs of the districts on the third level\n id = self.parse_level(r.text, 3)\n\n # Prepare data for the real web request\n url = \"https://www.landkreis-harburg.de/abfallkalender/abfallkalender_struktur_daten_suche.html\"\n params = {\n \"selected_ebene\": id,\n \"owner\": 20100,\n }\n r = session.get(url, params=params, headers=HEADERS)\n\n # Sometimes there is no garbage calendar available\n if \"Es sind keine Abfuhrbezirke hinterlegt.\" in r.text:\n raise Exception(\n f'Error: \"Es sind keine Abfuhrbezirke hinterlegt.\" for \"{self._districts[3-1]}\". Please use different input data.'\n )\n\n soup = BeautifulSoup(r.text, features=\"html.parser\")\n links = soup.find_all(\"a\")\n ical_url = \"\"\n for any_link in links:\n if \" als iCal\" in any_link.text:\n ical_url = any_link.get(\"href\")\n\n if \"ical.html\" not in ical_url:\n raise Exception(\"No ical Link in the result: \" + str(links))\n\n # Get the final data\n r = requests.get(ical_url, headers=HEADERS)\n if not r.ok:\n raise Exception(f\"Error: failed to fetch url: {ical_url}\")\n\n # Parse ics file\n dates = self._ics.convert(r.text)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n return entries\n\n def parse_level(self, response, level):\n soup = BeautifulSoup(response, features=\"html.parser\")\n select_content = soup.find_all(\"select\", id=f\"strukturEbene{level}\")\n soup = BeautifulSoup(str(select_content), features=\"html.parser\")\n options_content = soup.find_all(\"option\")\n level_ids = {}\n for option in options_content:\n # Ignore the \"Bitte w\u00e4hlen...\"\n if option.get(\"value\") != \"0\":\n level_ids[option.text] = option.get(\"value\")\n\n if level_ids == {}:\n raise Exception(f\"Error: Level {level} Dictionary empty\")\n\n if self._districts[level - 1] not in level_ids:\n raise Exception(\n f\"Error: District {self._districts[level]} is not in the dictionary: {level_ids}\"\n )\n\n return level_ids[self._districts[level - 1]]\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/aw_harburg_de.py"}]} | 2,620 | 439 |
gh_patches_debug_25116 | rasdani/github-patches | git_diff | lutris__lutris-2682 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Failure to read Steam's config.vdf due to wrong case
Lutris can't read Steam's config.vdf file because the "Steam" value is actually lowercase when Lutris expects it to be uppercase.

Same as #1966.
Failure to read Steam's config.vdf due to wrong case
Lutris can't read Steam's config.vdf file because the "Steam" value is actually lowercase when Lutris expects it to be uppercase.

Same as #1966.
</issue>
<code>
[start of lutris/util/steam/config.py]
1 """Handle Steam configuration"""
2 import os
3 from collections import OrderedDict, defaultdict
4
5 from lutris.util import system
6 from lutris.util.log import logger
7 from lutris.util.steam.vdf import vdf_parse
8
9
10 def get_default_acf(appid, name):
11 """Return a default configuration usable to
12 create a runnable game in Steam"""
13
14 userconfig = OrderedDict()
15 userconfig["name"] = name
16 userconfig["gameid"] = appid
17
18 appstate = OrderedDict()
19 appstate["appID"] = appid
20 appstate["Universe"] = "1"
21 appstate["StateFlags"] = "1026"
22 appstate["installdir"] = name
23 appstate["UserConfig"] = userconfig
24 return {"AppState": appstate}
25
26
27 def read_config(steam_data_dir):
28 """Read the Steam configuration and return it as an object"""
29 config_filename = os.path.join(steam_data_dir, "config/config.vdf")
30 if not system.path_exists(config_filename):
31 return None
32 with open(config_filename, "r") as steam_config_file:
33 config = vdf_parse(steam_config_file, {})
34 try:
35 return config["InstallConfigStore"]["Software"]["Valve"]["Steam"]
36 except KeyError:
37 try:
38 return config["InstallConfigStore"]["Software"]["valve"]["Steam"]
39 except KeyError as ex:
40 logger.error("Steam config %s is empty: %s", config_filename, ex)
41
42
43 def get_steamapps_paths_for_platform(platform_name):
44 """
45 """
46 from lutris.runners import winesteam, steam
47
48 runners = {"linux": steam.steam, "windows": winesteam.winesteam}
49 runner = runners[platform_name]()
50 return runner.get_steamapps_dirs()
51
52
53 def get_steamapps_paths(flat=False, platform=None):
54 base_platforms = ["linux", "windows"]
55 if flat:
56 steamapps_paths = []
57 else:
58 steamapps_paths = defaultdict(list)
59
60 if platform:
61 if platform not in base_platforms:
62 raise ValueError("Illegal value for Steam platform: %s" % platform)
63 platforms = [platform]
64 else:
65 platforms = base_platforms
66
67 for _platform in platforms:
68 folders = get_steamapps_paths_for_platform(_platform)
69 if flat:
70 steamapps_paths += folders
71 else:
72 steamapps_paths[_platform] = folders
73
74 return steamapps_paths
75
[end of lutris/util/steam/config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lutris/util/steam/config.py b/lutris/util/steam/config.py
--- a/lutris/util/steam/config.py
+++ b/lutris/util/steam/config.py
@@ -26,18 +26,25 @@
def read_config(steam_data_dir):
"""Read the Steam configuration and return it as an object"""
+
+ def get_entry_case_insensitive(config_dict, path):
+ for key, value in config_dict.items():
+ if key.lower() == path[0].lower():
+ if len(path) <= 1:
+ return config_dict[key]
+
+ return get_entry_case_insensitive(config_dict[key], path[1:])
+ raise KeyError(path[0])
+
config_filename = os.path.join(steam_data_dir, "config/config.vdf")
if not system.path_exists(config_filename):
return None
with open(config_filename, "r") as steam_config_file:
config = vdf_parse(steam_config_file, {})
try:
- return config["InstallConfigStore"]["Software"]["Valve"]["Steam"]
- except KeyError:
- try:
- return config["InstallConfigStore"]["Software"]["valve"]["Steam"]
- except KeyError as ex:
- logger.error("Steam config %s is empty: %s", config_filename, ex)
+ return get_entry_case_insensitive(config, ["InstallConfigStore", "Software", "Valve", "Steam"])
+ except KeyError as ex:
+ logger.error("Steam config %s is empty: %s", config_filename, ex)
def get_steamapps_paths_for_platform(platform_name):
| {"golden_diff": "diff --git a/lutris/util/steam/config.py b/lutris/util/steam/config.py\n--- a/lutris/util/steam/config.py\n+++ b/lutris/util/steam/config.py\n@@ -26,18 +26,25 @@\n \n def read_config(steam_data_dir):\n \"\"\"Read the Steam configuration and return it as an object\"\"\"\n+\n+ def get_entry_case_insensitive(config_dict, path):\n+ for key, value in config_dict.items():\n+ if key.lower() == path[0].lower():\n+ if len(path) <= 1:\n+ return config_dict[key]\n+\n+ return get_entry_case_insensitive(config_dict[key], path[1:])\n+ raise KeyError(path[0])\n+\n config_filename = os.path.join(steam_data_dir, \"config/config.vdf\")\n if not system.path_exists(config_filename):\n return None\n with open(config_filename, \"r\") as steam_config_file:\n config = vdf_parse(steam_config_file, {})\n try:\n- return config[\"InstallConfigStore\"][\"Software\"][\"Valve\"][\"Steam\"]\n- except KeyError:\n- try:\n- return config[\"InstallConfigStore\"][\"Software\"][\"valve\"][\"Steam\"]\n- except KeyError as ex:\n- logger.error(\"Steam config %s is empty: %s\", config_filename, ex)\n+ return get_entry_case_insensitive(config, [\"InstallConfigStore\", \"Software\", \"Valve\", \"Steam\"])\n+ except KeyError as ex:\n+ logger.error(\"Steam config %s is empty: %s\", config_filename, ex)\n \n \n def get_steamapps_paths_for_platform(platform_name):\n", "issue": "Failure to read Steam's config.vdf due to wrong case\nLutris can't read Steam's config.vdf file because the \"Steam\" value is actually lowercase when Lutris expects it to be uppercase.\r\n\r\n\r\n\r\nSame as #1966.\nFailure to read Steam's config.vdf due to wrong case\nLutris can't read Steam's config.vdf file because the \"Steam\" value is actually lowercase when Lutris expects it to be uppercase.\r\n\r\n\r\n\r\nSame as #1966.\n", "before_files": [{"content": "\"\"\"Handle Steam configuration\"\"\"\nimport os\nfrom collections import OrderedDict, defaultdict\n\nfrom lutris.util import system\nfrom lutris.util.log import logger\nfrom lutris.util.steam.vdf import vdf_parse\n\n\ndef get_default_acf(appid, name):\n \"\"\"Return a default configuration usable to\n create a runnable game in Steam\"\"\"\n\n userconfig = OrderedDict()\n userconfig[\"name\"] = name\n userconfig[\"gameid\"] = appid\n\n appstate = OrderedDict()\n appstate[\"appID\"] = appid\n appstate[\"Universe\"] = \"1\"\n appstate[\"StateFlags\"] = \"1026\"\n appstate[\"installdir\"] = name\n appstate[\"UserConfig\"] = userconfig\n return {\"AppState\": appstate}\n\n\ndef read_config(steam_data_dir):\n \"\"\"Read the Steam configuration and return it as an object\"\"\"\n config_filename = os.path.join(steam_data_dir, \"config/config.vdf\")\n if not system.path_exists(config_filename):\n return None\n with open(config_filename, \"r\") as steam_config_file:\n config = vdf_parse(steam_config_file, {})\n try:\n return config[\"InstallConfigStore\"][\"Software\"][\"Valve\"][\"Steam\"]\n except KeyError:\n try:\n return config[\"InstallConfigStore\"][\"Software\"][\"valve\"][\"Steam\"]\n except KeyError as ex:\n logger.error(\"Steam config %s is empty: %s\", config_filename, ex)\n\n\ndef get_steamapps_paths_for_platform(platform_name):\n \"\"\"\n \"\"\"\n from lutris.runners import winesteam, steam\n\n runners = {\"linux\": steam.steam, \"windows\": winesteam.winesteam}\n runner = runners[platform_name]()\n return runner.get_steamapps_dirs()\n\n\ndef get_steamapps_paths(flat=False, platform=None):\n base_platforms = [\"linux\", \"windows\"]\n if flat:\n steamapps_paths = []\n else:\n steamapps_paths = defaultdict(list)\n\n if platform:\n if platform not in base_platforms:\n raise ValueError(\"Illegal value for Steam platform: %s\" % platform)\n platforms = [platform]\n else:\n platforms = base_platforms\n\n for _platform in platforms:\n folders = get_steamapps_paths_for_platform(_platform)\n if flat:\n steamapps_paths += folders\n else:\n steamapps_paths[_platform] = folders\n\n return steamapps_paths\n", "path": "lutris/util/steam/config.py"}]} | 1,449 | 353 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.