problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_10923
|
rasdani/github-patches
|
git_diff
|
napari__napari-2930
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SettingsManager._load not working with Field(default_factory=...)
## 🐛 Bug
Ran into a [test failure](https://github.com/napari/napari/pull/2695/checks?check_run_id=2925027700#step:7:250) on #2695 that looks to be a bug in the Settings Manager.
The preferred way to add a default mutable container to a pydantic model is to use `default_factor`, but currently, if you do this, you'll get an error:
```python
class PluginsSettings(BaseNapariSettings):
# this works, and is used in a couple places in the settings
# but is not how an empty mutable object should be declared
some_setting: Dict[str, str] = Field(dict())
# this is the preferred way to create an empty mutable
# but it doesn't work:
some_other_setting: Dict[str, str] = Field(default_factory=dict)
```
traceback:
```pytb
napari/utils/settings/_manager.py:262: in get_settings
SETTINGS = SettingsManager(config_path=config_path)
napari/utils/settings/_manager.py:90: in __init__
self._load()
napari/utils/settings/_manager.py:186: in _load
self._defaults[section] = setting(**_section_defaults)
pydantic/env_settings.py:36: in pydantic.env_settings.BaseSettings.__init__
???
napari/utils/events/evented_model.py:129: in __init__
super().__init__(**kwargs)
pydantic/main.py:406: in pydantic.main.BaseModel.__init__
???
E pydantic.error_wrappers.ValidationError: 2 validation errors for PluginsSettings
E extension2reader
E none is not an allowed value (type=type_error.none.not_allowed)
E extension2writer
E none is not an allowed value (type=type_error.none.not_allowed)
```
</issue>
<code>
[start of napari/utils/settings/_manager.py]
1 """Settings management.
2 """
3
4 import json
5 import os
6 import warnings
7 from pathlib import Path
8 from typing import Any, Dict, List, Optional, Union
9
10 from appdirs import user_config_dir
11 from yaml import safe_dump, safe_load
12
13 from ...utils.translations import trans
14 from .._base import _APPAUTHOR, _APPNAME, _FILENAME
15 from ._defaults import CORE_SETTINGS as CORE_SETTINGS
16 from ._defaults import (
17 AppearanceSettings,
18 ApplicationSettings,
19 BaseNapariSettings,
20 ExperimentalSettings,
21 PluginsSettings,
22 ShortcutsSettings,
23 )
24
25
26 class _SettingsMixin:
27 appearance: AppearanceSettings
28 application: ApplicationSettings
29 plugins: PluginsSettings
30 shortcuts: ShortcutsSettings
31 experimental: ExperimentalSettings
32
33
34 class SettingsManager(_SettingsMixin):
35 """
36 Napari settings manager using evented SettingsModels.
37
38 This provides the presistence layer for the application settings.
39
40 Parameters
41 ----------
42 config_path : str, optional
43 Provide the base folder to store napari configuration. Default is None,
44 which will point to user config provided by `appdirs`.
45 save_to_disk : bool, optional
46 Persist settings on disk. Default is True.
47
48 Notes
49 -----
50 The settings manager will create a new user configuration folder which is
51 provided by `appdirs` in a cross platform manner. On the first startup a
52 new configuration file will be created using the default values defined by
53 the `CORE_SETTINGS` models.
54
55 If a configuration file is found in the specified location, it will be
56 loaded by the `_load` method. On configuration load the following checks
57 are performed:
58
59 - If invalid sections are found, these will be removed from the file.
60 - If invalid keys are found within a valid section, these will be removed
61 from the file.
62 - If invalid values are found within valid sections and valid keys, these
63 will be replaced by the default value provided by `CORE_SETTINGS`
64 models.
65 """
66
67 _FILENAME = _FILENAME
68 _APPNAME = _APPNAME
69 _APPAUTHOR = _APPAUTHOR
70
71 def __init__(
72 self,
73 config_path: Optional[Path] = None,
74 save_to_disk: bool = True,
75 ):
76 self._config_path = (
77 Path(user_config_dir(self._APPNAME, self._APPAUTHOR))
78 if config_path is None
79 else Path(config_path)
80 )
81 self._save_to_disk = save_to_disk
82 self._settings: Dict[str, BaseNapariSettings] = {}
83 self._defaults: Dict[str, BaseNapariSettings] = {}
84 self._plugins: List[str] = []
85 self._env_settings: Dict[str, Any] = {}
86
87 if not self._config_path.is_dir():
88 os.makedirs(self._config_path)
89
90 self._load()
91
92 def __getattr__(self, attr):
93 if attr in self._settings:
94 return self._settings[attr]
95
96 def __dir__(self):
97 """Add setting keys to make tab completion works."""
98 return list(super().__dir__()) + list(self._settings)
99
100 def __str__(self):
101 return safe_dump(self._to_dict(safe=True))
102
103 def _remove_default(self, settings_data):
104 """
105 Attempt to convert self to dict and to remove any default values from the configuration
106 """
107 for section, values in settings_data.items():
108 if section not in self._defaults:
109 continue
110
111 default_values = self._defaults[section].dict()
112 for k, v in list(values.items()):
113 if default_values.get(k, None) == v:
114 del values[k]
115
116 return settings_data
117
118 def _to_dict(self, safe: bool = False) -> dict:
119 """Convert the settings to a dictionary."""
120 data = {}
121 for section, model in self._settings.items():
122 if safe:
123 # We roundtrip to keep string objects (like SchemaVersion)
124 # yaml representable
125 data[section] = json.loads(model.json())
126 else:
127 data[section] = model.dict()
128
129 return data
130
131 def _save(self):
132 """Save configuration to disk."""
133 if self._save_to_disk:
134 path = self.path / self._FILENAME
135
136 if self._env_settings:
137 # If using environment variables do not save them in the
138 # `settings.yaml` file. We just delete any keys loaded
139 # as environment variables
140 data = self._to_dict(safe=True)
141 for section, env_data in self._env_settings.items():
142 for k, v in env_data.items():
143 del data[section][k]
144 else:
145 data = self._to_dict(safe=True)
146
147 with open(path, "w") as fh:
148 fh.write(safe_dump(self._remove_default(data)))
149
150 def _load(self):
151 """Read configuration from disk."""
152 path = self.path / self._FILENAME
153
154 if path.is_file():
155 try:
156 with open(path) as fh:
157 data = safe_load(fh.read()) or {}
158 except Exception as err:
159 warnings.warn(
160 trans._(
161 "The content of the napari settings file could not be read\n\nThe default settings will be used and the content of the file will be replaced the next time settings are changed.\n\nError:\n{err}",
162 deferred=True,
163 err=err,
164 )
165 )
166 data = {}
167
168 # Load data once and save it in the base class
169 BaseNapariSettings._LOADED_DATA = data
170
171 for setting in CORE_SETTINGS:
172 section = setting.schema().get("section", None)
173 if section is None:
174 raise ValueError(
175 trans._(
176 "Settings model {setting!r} must provide a `section` in the `schemas_extra`",
177 deferred=True,
178 setting=setting,
179 )
180 )
181
182 _section_defaults = {}
183 for option, option_data in setting.schema()["properties"].items():
184 _section_defaults[option] = option_data.get("default", None)
185
186 self._defaults[section] = setting(**_section_defaults)
187 model = setting()
188 model.events.connect(lambda x: self._save())
189 self._settings[section] = model
190 self._env_settings[section] = getattr(
191 model.__config__, "_env_settings"
192 )(model)
193
194 self._save()
195
196 @property
197 def path(self):
198 return self._config_path
199
200 def reset(self):
201 """Reset settings to default values."""
202 for section in self._settings:
203 for key, default_value in self._defaults[section].dict().items():
204 setattr(self._settings[section], key, default_value)
205
206 self._save()
207
208 def schemas(self) -> dict:
209 """Return the json schema for each of the settings model."""
210 schemas = {}
211 for section, settings in self._settings.items():
212 schemas[section] = {
213 "json_schema": settings.schema_json(),
214 "model": settings,
215 }
216
217 return schemas
218
219 def register_plugin(self, plugin):
220 """Register plugin settings with the settings manager.
221
222 Parameters
223 ----------
224 plugin
225 The napari plugin that may or may not provide settings.
226 """
227 self._plugins.append(plugin)
228
229
230 class _SettingsProxy(_SettingsMixin):
231 """Backwards compatibility layer."""
232
233 def __getattribute__(self, name) -> Any:
234 return getattr(get_settings(), name)
235
236
237 SETTINGS: Union[SettingsManager, _SettingsProxy] = _SettingsProxy()
238
239
240 def get_settings(path: Optional[Union[Path, str]] = None) -> SettingsManager:
241 """
242 Get settings for a given path.
243
244 Parameters
245 ----------
246 path: Path, optional
247 The path to read/write the settings from.
248
249 Returns
250 -------
251 SettingsManager
252 The settings manager.
253
254 Notes
255 -----
256 The path can only be set once per session.
257 """
258 global SETTINGS
259
260 if isinstance(SETTINGS, _SettingsProxy):
261 config_path = Path(path).resolve() if path else None
262 SETTINGS = SettingsManager(config_path=config_path)
263 elif path is not None:
264 import inspect
265
266 curframe = inspect.currentframe()
267 calframe = inspect.getouterframes(curframe, 2)
268 raise Exception(
269 trans._(
270 "The path can only be set once per session. Settings called from {calframe[1][3]}",
271 deferred=True,
272 calframe=calframe,
273 )
274 )
275
276 return SETTINGS
277
[end of napari/utils/settings/_manager.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/napari/utils/settings/_manager.py b/napari/utils/settings/_manager.py
--- a/napari/utils/settings/_manager.py
+++ b/napari/utils/settings/_manager.py
@@ -179,11 +179,9 @@
)
)
- _section_defaults = {}
- for option, option_data in setting.schema()["properties"].items():
- _section_defaults[option] = option_data.get("default", None)
-
- self._defaults[section] = setting(**_section_defaults)
+ self._defaults[section] = setting(
+ **{k: v.get_default() for k, v in setting.__fields__.items()}
+ )
model = setting()
model.events.connect(lambda x: self._save())
self._settings[section] = model
|
{"golden_diff": "diff --git a/napari/utils/settings/_manager.py b/napari/utils/settings/_manager.py\n--- a/napari/utils/settings/_manager.py\n+++ b/napari/utils/settings/_manager.py\n@@ -179,11 +179,9 @@\n )\n )\n \n- _section_defaults = {}\n- for option, option_data in setting.schema()[\"properties\"].items():\n- _section_defaults[option] = option_data.get(\"default\", None)\n-\n- self._defaults[section] = setting(**_section_defaults)\n+ self._defaults[section] = setting(\n+ **{k: v.get_default() for k, v in setting.__fields__.items()}\n+ )\n model = setting()\n model.events.connect(lambda x: self._save())\n self._settings[section] = model\n", "issue": "SettingsManager._load not working with Field(default_factory=...)\n## \ud83d\udc1b Bug\r\nRan into a [test failure](https://github.com/napari/napari/pull/2695/checks?check_run_id=2925027700#step:7:250) on #2695 that looks to be a bug in the Settings Manager.\r\n\r\nThe preferred way to add a default mutable container to a pydantic model is to use `default_factor`, but currently, if you do this, you'll get an error:\r\n\r\n```python\r\nclass PluginsSettings(BaseNapariSettings):\r\n # this works, and is used in a couple places in the settings\r\n # but is not how an empty mutable object should be declared\r\n some_setting: Dict[str, str] = Field(dict())\r\n\r\n # this is the preferred way to create an empty mutable\r\n # but it doesn't work:\r\n some_other_setting: Dict[str, str] = Field(default_factory=dict)\r\n```\r\n\r\ntraceback:\r\n\r\n```pytb\r\nnapari/utils/settings/_manager.py:262: in get_settings\r\n SETTINGS = SettingsManager(config_path=config_path)\r\nnapari/utils/settings/_manager.py:90: in __init__\r\n self._load()\r\nnapari/utils/settings/_manager.py:186: in _load\r\n self._defaults[section] = setting(**_section_defaults)\r\npydantic/env_settings.py:36: in pydantic.env_settings.BaseSettings.__init__\r\n ???\r\nnapari/utils/events/evented_model.py:129: in __init__\r\n super().__init__(**kwargs)\r\npydantic/main.py:406: in pydantic.main.BaseModel.__init__\r\n ???\r\nE pydantic.error_wrappers.ValidationError: 2 validation errors for PluginsSettings\r\nE extension2reader\r\nE none is not an allowed value (type=type_error.none.not_allowed)\r\nE extension2writer\r\nE none is not an allowed value (type=type_error.none.not_allowed)\r\n\r\n```\n", "before_files": [{"content": "\"\"\"Settings management.\n\"\"\"\n\nimport json\nimport os\nimport warnings\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional, Union\n\nfrom appdirs import user_config_dir\nfrom yaml import safe_dump, safe_load\n\nfrom ...utils.translations import trans\nfrom .._base import _APPAUTHOR, _APPNAME, _FILENAME\nfrom ._defaults import CORE_SETTINGS as CORE_SETTINGS\nfrom ._defaults import (\n AppearanceSettings,\n ApplicationSettings,\n BaseNapariSettings,\n ExperimentalSettings,\n PluginsSettings,\n ShortcutsSettings,\n)\n\n\nclass _SettingsMixin:\n appearance: AppearanceSettings\n application: ApplicationSettings\n plugins: PluginsSettings\n shortcuts: ShortcutsSettings\n experimental: ExperimentalSettings\n\n\nclass SettingsManager(_SettingsMixin):\n \"\"\"\n Napari settings manager using evented SettingsModels.\n\n This provides the presistence layer for the application settings.\n\n Parameters\n ----------\n config_path : str, optional\n Provide the base folder to store napari configuration. Default is None,\n which will point to user config provided by `appdirs`.\n save_to_disk : bool, optional\n Persist settings on disk. Default is True.\n\n Notes\n -----\n The settings manager will create a new user configuration folder which is\n provided by `appdirs` in a cross platform manner. On the first startup a\n new configuration file will be created using the default values defined by\n the `CORE_SETTINGS` models.\n\n If a configuration file is found in the specified location, it will be\n loaded by the `_load` method. On configuration load the following checks\n are performed:\n\n - If invalid sections are found, these will be removed from the file.\n - If invalid keys are found within a valid section, these will be removed\n from the file.\n - If invalid values are found within valid sections and valid keys, these\n will be replaced by the default value provided by `CORE_SETTINGS`\n models.\n \"\"\"\n\n _FILENAME = _FILENAME\n _APPNAME = _APPNAME\n _APPAUTHOR = _APPAUTHOR\n\n def __init__(\n self,\n config_path: Optional[Path] = None,\n save_to_disk: bool = True,\n ):\n self._config_path = (\n Path(user_config_dir(self._APPNAME, self._APPAUTHOR))\n if config_path is None\n else Path(config_path)\n )\n self._save_to_disk = save_to_disk\n self._settings: Dict[str, BaseNapariSettings] = {}\n self._defaults: Dict[str, BaseNapariSettings] = {}\n self._plugins: List[str] = []\n self._env_settings: Dict[str, Any] = {}\n\n if not self._config_path.is_dir():\n os.makedirs(self._config_path)\n\n self._load()\n\n def __getattr__(self, attr):\n if attr in self._settings:\n return self._settings[attr]\n\n def __dir__(self):\n \"\"\"Add setting keys to make tab completion works.\"\"\"\n return list(super().__dir__()) + list(self._settings)\n\n def __str__(self):\n return safe_dump(self._to_dict(safe=True))\n\n def _remove_default(self, settings_data):\n \"\"\"\n Attempt to convert self to dict and to remove any default values from the configuration\n \"\"\"\n for section, values in settings_data.items():\n if section not in self._defaults:\n continue\n\n default_values = self._defaults[section].dict()\n for k, v in list(values.items()):\n if default_values.get(k, None) == v:\n del values[k]\n\n return settings_data\n\n def _to_dict(self, safe: bool = False) -> dict:\n \"\"\"Convert the settings to a dictionary.\"\"\"\n data = {}\n for section, model in self._settings.items():\n if safe:\n # We roundtrip to keep string objects (like SchemaVersion)\n # yaml representable\n data[section] = json.loads(model.json())\n else:\n data[section] = model.dict()\n\n return data\n\n def _save(self):\n \"\"\"Save configuration to disk.\"\"\"\n if self._save_to_disk:\n path = self.path / self._FILENAME\n\n if self._env_settings:\n # If using environment variables do not save them in the\n # `settings.yaml` file. We just delete any keys loaded\n # as environment variables\n data = self._to_dict(safe=True)\n for section, env_data in self._env_settings.items():\n for k, v in env_data.items():\n del data[section][k]\n else:\n data = self._to_dict(safe=True)\n\n with open(path, \"w\") as fh:\n fh.write(safe_dump(self._remove_default(data)))\n\n def _load(self):\n \"\"\"Read configuration from disk.\"\"\"\n path = self.path / self._FILENAME\n\n if path.is_file():\n try:\n with open(path) as fh:\n data = safe_load(fh.read()) or {}\n except Exception as err:\n warnings.warn(\n trans._(\n \"The content of the napari settings file could not be read\\n\\nThe default settings will be used and the content of the file will be replaced the next time settings are changed.\\n\\nError:\\n{err}\",\n deferred=True,\n err=err,\n )\n )\n data = {}\n\n # Load data once and save it in the base class\n BaseNapariSettings._LOADED_DATA = data\n\n for setting in CORE_SETTINGS:\n section = setting.schema().get(\"section\", None)\n if section is None:\n raise ValueError(\n trans._(\n \"Settings model {setting!r} must provide a `section` in the `schemas_extra`\",\n deferred=True,\n setting=setting,\n )\n )\n\n _section_defaults = {}\n for option, option_data in setting.schema()[\"properties\"].items():\n _section_defaults[option] = option_data.get(\"default\", None)\n\n self._defaults[section] = setting(**_section_defaults)\n model = setting()\n model.events.connect(lambda x: self._save())\n self._settings[section] = model\n self._env_settings[section] = getattr(\n model.__config__, \"_env_settings\"\n )(model)\n\n self._save()\n\n @property\n def path(self):\n return self._config_path\n\n def reset(self):\n \"\"\"Reset settings to default values.\"\"\"\n for section in self._settings:\n for key, default_value in self._defaults[section].dict().items():\n setattr(self._settings[section], key, default_value)\n\n self._save()\n\n def schemas(self) -> dict:\n \"\"\"Return the json schema for each of the settings model.\"\"\"\n schemas = {}\n for section, settings in self._settings.items():\n schemas[section] = {\n \"json_schema\": settings.schema_json(),\n \"model\": settings,\n }\n\n return schemas\n\n def register_plugin(self, plugin):\n \"\"\"Register plugin settings with the settings manager.\n\n Parameters\n ----------\n plugin\n The napari plugin that may or may not provide settings.\n \"\"\"\n self._plugins.append(plugin)\n\n\nclass _SettingsProxy(_SettingsMixin):\n \"\"\"Backwards compatibility layer.\"\"\"\n\n def __getattribute__(self, name) -> Any:\n return getattr(get_settings(), name)\n\n\nSETTINGS: Union[SettingsManager, _SettingsProxy] = _SettingsProxy()\n\n\ndef get_settings(path: Optional[Union[Path, str]] = None) -> SettingsManager:\n \"\"\"\n Get settings for a given path.\n\n Parameters\n ----------\n path: Path, optional\n The path to read/write the settings from.\n\n Returns\n -------\n SettingsManager\n The settings manager.\n\n Notes\n -----\n The path can only be set once per session.\n \"\"\"\n global SETTINGS\n\n if isinstance(SETTINGS, _SettingsProxy):\n config_path = Path(path).resolve() if path else None\n SETTINGS = SettingsManager(config_path=config_path)\n elif path is not None:\n import inspect\n\n curframe = inspect.currentframe()\n calframe = inspect.getouterframes(curframe, 2)\n raise Exception(\n trans._(\n \"The path can only be set once per session. Settings called from {calframe[1][3]}\",\n deferred=True,\n calframe=calframe,\n )\n )\n\n return SETTINGS\n", "path": "napari/utils/settings/_manager.py"}]}
| 3,527 | 177 |
gh_patches_debug_20877
|
rasdani/github-patches
|
git_diff
|
googleapis__python-bigquery-449
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Integration test with pyarrow nightly builds?
I'm not sure how difficult it would be to add, but it might be useful to regularly test the library against pyarrow nightly build's to catch regressions before they make their way into releases.
</issue>
<code>
[start of noxfile.py]
1 # Copyright 2016 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16
17 import pathlib
18 import os
19 import shutil
20
21 import nox
22
23
24 BLACK_VERSION = "black==19.10b0"
25 BLACK_PATHS = ("docs", "google", "samples", "tests", "noxfile.py", "setup.py")
26 CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
27
28 # 'docfx' is excluded since it only needs to run in 'docs-presubmit'
29 nox.options.sessions = [
30 "unit",
31 "system",
32 "snippets",
33 "cover",
34 "lint",
35 "lint_setup_py",
36 "blacken",
37 "docs",
38 ]
39
40
41 def default(session):
42 """Default unit test session.
43
44 This is intended to be run **without** an interpreter set, so
45 that the current ``python`` (on the ``PATH``) or the version of
46 Python corresponding to the ``nox`` binary the ``PATH`` can
47 run the tests.
48 """
49 constraints_path = str(
50 CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
51 )
52
53 # Install all test dependencies, then install local packages in-place.
54 session.install(
55 "mock",
56 "pytest",
57 "google-cloud-testutils",
58 "pytest-cov",
59 "freezegun",
60 "-c",
61 constraints_path,
62 )
63
64 session.install("-e", ".[all]", "-c", constraints_path)
65
66 session.install("ipython", "-c", constraints_path)
67
68 # Run py.test against the unit tests.
69 session.run(
70 "py.test",
71 "--quiet",
72 "--cov=google.cloud.bigquery",
73 "--cov=tests.unit",
74 "--cov-append",
75 "--cov-config=.coveragerc",
76 "--cov-report=",
77 "--cov-fail-under=0",
78 os.path.join("tests", "unit"),
79 *session.posargs,
80 )
81
82
83 @nox.session(python=["3.6", "3.7", "3.8"])
84 def unit(session):
85 """Run the unit test suite."""
86 default(session)
87
88
89 @nox.session(python=["3.8"])
90 def system(session):
91 """Run the system test suite."""
92
93 constraints_path = str(
94 CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
95 )
96
97 # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.
98 if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false":
99 session.skip("RUN_SYSTEM_TESTS is set to false, skipping")
100
101 # Sanity check: Only run system tests if the environment variable is set.
102 if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
103 session.skip("Credentials must be set via environment variable.")
104
105 # Use pre-release gRPC for system tests.
106 session.install("--pre", "grpcio", "-c", constraints_path)
107
108 # Install all test dependencies, then install local packages in place.
109 session.install(
110 "mock", "pytest", "psutil", "google-cloud-testutils", "-c", constraints_path
111 )
112 session.install("google-cloud-storage", "-c", constraints_path)
113
114 session.install("-e", ".[all]", "-c", constraints_path)
115 session.install("ipython", "-c", constraints_path)
116
117 # Run py.test against the system tests.
118 session.run(
119 "py.test", "--quiet", os.path.join("tests", "system.py"), *session.posargs
120 )
121
122
123 @nox.session(python=["3.8"])
124 def snippets(session):
125 """Run the snippets test suite."""
126
127 # Check the value of `RUN_SNIPPETS_TESTS` env var. It defaults to true.
128 if os.environ.get("RUN_SNIPPETS_TESTS", "true") == "false":
129 session.skip("RUN_SNIPPETS_TESTS is set to false, skipping")
130
131 # Sanity check: Only run snippets tests if the environment variable is set.
132 if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
133 session.skip("Credentials must be set via environment variable.")
134
135 constraints_path = str(
136 CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
137 )
138
139 # Install all test dependencies, then install local packages in place.
140 session.install("mock", "pytest", "google-cloud-testutils", "-c", constraints_path)
141 session.install("google-cloud-storage", "-c", constraints_path)
142 session.install("grpcio", "-c", constraints_path)
143
144 session.install("-e", ".[all]", "-c", constraints_path)
145
146 # Run py.test against the snippets tests.
147 # Skip tests in samples/snippets, as those are run in a different session
148 # using the nox config from that directory.
149 session.run("py.test", os.path.join("docs", "snippets.py"), *session.posargs)
150 session.run(
151 "py.test",
152 "samples",
153 "--ignore=samples/snippets",
154 "--ignore=samples/geography",
155 *session.posargs,
156 )
157
158
159 @nox.session(python="3.8")
160 def cover(session):
161 """Run the final coverage report.
162
163 This outputs the coverage report aggregating coverage from the unit
164 test runs (not system test runs), and then erases coverage data.
165 """
166 session.install("coverage", "pytest-cov")
167 session.run("coverage", "report", "--show-missing", "--fail-under=100")
168 session.run("coverage", "erase")
169
170
171 @nox.session(python="3.8")
172 def lint(session):
173 """Run linters.
174
175 Returns a failure if the linters find linting errors or sufficiently
176 serious code quality issues.
177 """
178
179 session.install("flake8", BLACK_VERSION)
180 session.install("-e", ".")
181 session.run("flake8", os.path.join("google", "cloud", "bigquery"))
182 session.run("flake8", "tests")
183 session.run("flake8", os.path.join("docs", "samples"))
184 session.run("flake8", os.path.join("docs", "snippets.py"))
185 session.run("black", "--check", *BLACK_PATHS)
186
187
188 @nox.session(python="3.8")
189 def lint_setup_py(session):
190 """Verify that setup.py is valid (including RST check)."""
191
192 session.install("docutils", "Pygments")
193 session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
194
195
196 @nox.session(python="3.6")
197 def blacken(session):
198 """Run black.
199 Format code to uniform standard.
200
201 This currently uses Python 3.6 due to the automated Kokoro run of synthtool.
202 That run uses an image that doesn't have 3.6 installed. Before updating this
203 check the state of the `gcp_ubuntu_config` we use for that Kokoro run.
204 """
205 session.install(BLACK_VERSION)
206 session.run("black", *BLACK_PATHS)
207
208
209 @nox.session(python="3.8")
210 def docs(session):
211 """Build the docs."""
212
213 session.install("ipython", "recommonmark", "sphinx", "sphinx_rtd_theme")
214 session.install("google-cloud-storage")
215 session.install("-e", ".[all]")
216
217 shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
218 session.run(
219 "sphinx-build",
220 "-W", # warnings as errors
221 "-T", # show full traceback on exception
222 "-N", # no colors
223 "-b",
224 "html",
225 "-d",
226 os.path.join("docs", "_build", "doctrees", ""),
227 os.path.join("docs", ""),
228 os.path.join("docs", "_build", "html", ""),
229 )
230
231
232 @nox.session(python="3.8")
233 def docfx(session):
234 """Build the docfx yaml files for this library."""
235
236 session.install("-e", ".")
237 session.install("sphinx", "alabaster", "recommonmark", "sphinx-docfx-yaml")
238
239 shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
240 session.run(
241 "sphinx-build",
242 "-T", # show full traceback on exception
243 "-N", # no colors
244 "-D",
245 (
246 "extensions=sphinx.ext.autodoc,"
247 "sphinx.ext.autosummary,"
248 "docfx_yaml.extension,"
249 "sphinx.ext.intersphinx,"
250 "sphinx.ext.coverage,"
251 "sphinx.ext.napoleon,"
252 "sphinx.ext.todo,"
253 "sphinx.ext.viewcode,"
254 "recommonmark"
255 ),
256 "-b",
257 "html",
258 "-d",
259 os.path.join("docs", "_build", "doctrees", ""),
260 os.path.join("docs", ""),
261 os.path.join("docs", "_build", "html", ""),
262 )
263
[end of noxfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -168,6 +168,38 @@
session.run("coverage", "erase")
[email protected](python="3.8")
+def prerelease_deps(session):
+ """Run all tests with prerelease versions of dependencies installed.
+
+ https://github.com/googleapis/python-bigquery/issues/95
+ """
+ # PyArrow prerelease packages are published to an alternative PyPI host.
+ # https://arrow.apache.org/docs/python/install.html#installing-nightly-packages
+ session.install(
+ "--extra-index-url", "https://pypi.fury.io/arrow-nightlies/", "--pre", "pyarrow"
+ )
+ session.install("--pre", "grpcio", "pandas")
+ session.install(
+ "mock",
+ "pytest",
+ "google-cloud-testutils",
+ "pytest-cov",
+ "freezegun",
+ "IPython",
+ )
+ session.install("-e", ".[all]")
+
+ # Print out prerelease package versions.
+ session.run("python", "-c", "import grpc; print(grpc.__version__)")
+ session.run("python", "-c", "import pandas; print(pandas.__version__)")
+ session.run("python", "-c", "import pyarrow; print(pyarrow.__version__)")
+
+ # Run all tests, except a few samples tests which require extra dependencies.
+ session.run("py.test", "tests")
+ session.run("py.test", "samples/tests")
+
+
@nox.session(python="3.8")
def lint(session):
"""Run linters.
|
{"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -168,6 +168,38 @@\n session.run(\"coverage\", \"erase\")\n \n \[email protected](python=\"3.8\")\n+def prerelease_deps(session):\n+ \"\"\"Run all tests with prerelease versions of dependencies installed.\n+\n+ https://github.com/googleapis/python-bigquery/issues/95\n+ \"\"\"\n+ # PyArrow prerelease packages are published to an alternative PyPI host.\n+ # https://arrow.apache.org/docs/python/install.html#installing-nightly-packages\n+ session.install(\n+ \"--extra-index-url\", \"https://pypi.fury.io/arrow-nightlies/\", \"--pre\", \"pyarrow\"\n+ )\n+ session.install(\"--pre\", \"grpcio\", \"pandas\")\n+ session.install(\n+ \"mock\",\n+ \"pytest\",\n+ \"google-cloud-testutils\",\n+ \"pytest-cov\",\n+ \"freezegun\",\n+ \"IPython\",\n+ )\n+ session.install(\"-e\", \".[all]\")\n+\n+ # Print out prerelease package versions.\n+ session.run(\"python\", \"-c\", \"import grpc; print(grpc.__version__)\")\n+ session.run(\"python\", \"-c\", \"import pandas; print(pandas.__version__)\")\n+ session.run(\"python\", \"-c\", \"import pyarrow; print(pyarrow.__version__)\")\n+\n+ # Run all tests, except a few samples tests which require extra dependencies.\n+ session.run(\"py.test\", \"tests\")\n+ session.run(\"py.test\", \"samples/tests\")\n+\n+\n @nox.session(python=\"3.8\")\n def lint(session):\n \"\"\"Run linters.\n", "issue": "Integration test with pyarrow nightly builds?\nI'm not sure how difficult it would be to add, but it might be useful to regularly test the library against pyarrow nightly build's to catch regressions before they make their way into releases.\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport pathlib\nimport os\nimport shutil\n\nimport nox\n\n\nBLACK_VERSION = \"black==19.10b0\"\nBLACK_PATHS = (\"docs\", \"google\", \"samples\", \"tests\", \"noxfile.py\", \"setup.py\")\nCURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()\n\n# 'docfx' is excluded since it only needs to run in 'docs-presubmit'\nnox.options.sessions = [\n \"unit\",\n \"system\",\n \"snippets\",\n \"cover\",\n \"lint\",\n \"lint_setup_py\",\n \"blacken\",\n \"docs\",\n]\n\n\ndef default(session):\n \"\"\"Default unit test session.\n\n This is intended to be run **without** an interpreter set, so\n that the current ``python`` (on the ``PATH``) or the version of\n Python corresponding to the ``nox`` binary the ``PATH`` can\n run the tests.\n \"\"\"\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Install all test dependencies, then install local packages in-place.\n session.install(\n \"mock\",\n \"pytest\",\n \"google-cloud-testutils\",\n \"pytest-cov\",\n \"freezegun\",\n \"-c\",\n constraints_path,\n )\n\n session.install(\"-e\", \".[all]\", \"-c\", constraints_path)\n\n session.install(\"ipython\", \"-c\", constraints_path)\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=google.cloud.bigquery\",\n \"--cov=tests.unit\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=0\",\n os.path.join(\"tests\", \"unit\"),\n *session.posargs,\n )\n\n\[email protected](python=[\"3.6\", \"3.7\", \"3.8\"])\ndef unit(session):\n \"\"\"Run the unit test suite.\"\"\"\n default(session)\n\n\[email protected](python=[\"3.8\"])\ndef system(session):\n \"\"\"Run the system test suite.\"\"\"\n\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.\n if os.environ.get(\"RUN_SYSTEM_TESTS\", \"true\") == \"false\":\n session.skip(\"RUN_SYSTEM_TESTS is set to false, skipping\")\n\n # Sanity check: Only run system tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n # Use pre-release gRPC for system tests.\n session.install(\"--pre\", \"grpcio\", \"-c\", constraints_path)\n\n # Install all test dependencies, then install local packages in place.\n session.install(\n \"mock\", \"pytest\", \"psutil\", \"google-cloud-testutils\", \"-c\", constraints_path\n )\n session.install(\"google-cloud-storage\", \"-c\", constraints_path)\n\n session.install(\"-e\", \".[all]\", \"-c\", constraints_path)\n session.install(\"ipython\", \"-c\", constraints_path)\n\n # Run py.test against the system tests.\n session.run(\n \"py.test\", \"--quiet\", os.path.join(\"tests\", \"system.py\"), *session.posargs\n )\n\n\[email protected](python=[\"3.8\"])\ndef snippets(session):\n \"\"\"Run the snippets test suite.\"\"\"\n\n # Check the value of `RUN_SNIPPETS_TESTS` env var. It defaults to true.\n if os.environ.get(\"RUN_SNIPPETS_TESTS\", \"true\") == \"false\":\n session.skip(\"RUN_SNIPPETS_TESTS is set to false, skipping\")\n\n # Sanity check: Only run snippets tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Install all test dependencies, then install local packages in place.\n session.install(\"mock\", \"pytest\", \"google-cloud-testutils\", \"-c\", constraints_path)\n session.install(\"google-cloud-storage\", \"-c\", constraints_path)\n session.install(\"grpcio\", \"-c\", constraints_path)\n\n session.install(\"-e\", \".[all]\", \"-c\", constraints_path)\n\n # Run py.test against the snippets tests.\n # Skip tests in samples/snippets, as those are run in a different session\n # using the nox config from that directory.\n session.run(\"py.test\", os.path.join(\"docs\", \"snippets.py\"), *session.posargs)\n session.run(\n \"py.test\",\n \"samples\",\n \"--ignore=samples/snippets\",\n \"--ignore=samples/geography\",\n *session.posargs,\n )\n\n\[email protected](python=\"3.8\")\ndef cover(session):\n \"\"\"Run the final coverage report.\n\n This outputs the coverage report aggregating coverage from the unit\n test runs (not system test runs), and then erases coverage data.\n \"\"\"\n session.install(\"coverage\", \"pytest-cov\")\n session.run(\"coverage\", \"report\", \"--show-missing\", \"--fail-under=100\")\n session.run(\"coverage\", \"erase\")\n\n\[email protected](python=\"3.8\")\ndef lint(session):\n \"\"\"Run linters.\n\n Returns a failure if the linters find linting errors or sufficiently\n serious code quality issues.\n \"\"\"\n\n session.install(\"flake8\", BLACK_VERSION)\n session.install(\"-e\", \".\")\n session.run(\"flake8\", os.path.join(\"google\", \"cloud\", \"bigquery\"))\n session.run(\"flake8\", \"tests\")\n session.run(\"flake8\", os.path.join(\"docs\", \"samples\"))\n session.run(\"flake8\", os.path.join(\"docs\", \"snippets.py\"))\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n\n\[email protected](python=\"3.8\")\ndef lint_setup_py(session):\n \"\"\"Verify that setup.py is valid (including RST check).\"\"\"\n\n session.install(\"docutils\", \"Pygments\")\n session.run(\"python\", \"setup.py\", \"check\", \"--restructuredtext\", \"--strict\")\n\n\[email protected](python=\"3.6\")\ndef blacken(session):\n \"\"\"Run black.\n Format code to uniform standard.\n\n This currently uses Python 3.6 due to the automated Kokoro run of synthtool.\n That run uses an image that doesn't have 3.6 installed. Before updating this\n check the state of the `gcp_ubuntu_config` we use for that Kokoro run.\n \"\"\"\n session.install(BLACK_VERSION)\n session.run(\"black\", *BLACK_PATHS)\n\n\[email protected](python=\"3.8\")\ndef docs(session):\n \"\"\"Build the docs.\"\"\"\n\n session.install(\"ipython\", \"recommonmark\", \"sphinx\", \"sphinx_rtd_theme\")\n session.install(\"google-cloud-storage\")\n session.install(\"-e\", \".[all]\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-W\", # warnings as errors\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n\n\[email protected](python=\"3.8\")\ndef docfx(session):\n \"\"\"Build the docfx yaml files for this library.\"\"\"\n\n session.install(\"-e\", \".\")\n session.install(\"sphinx\", \"alabaster\", \"recommonmark\", \"sphinx-docfx-yaml\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-D\",\n (\n \"extensions=sphinx.ext.autodoc,\"\n \"sphinx.ext.autosummary,\"\n \"docfx_yaml.extension,\"\n \"sphinx.ext.intersphinx,\"\n \"sphinx.ext.coverage,\"\n \"sphinx.ext.napoleon,\"\n \"sphinx.ext.todo,\"\n \"sphinx.ext.viewcode,\"\n \"recommonmark\"\n ),\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n", "path": "noxfile.py"}]}
| 3,315 | 387 |
gh_patches_debug_28282
|
rasdani/github-patches
|
git_diff
|
getpelican__pelican-1040
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow nice URLs on devserver
When you publish the website is quite easy to implement nice URLs. For example an nginx configuraiton like this would provide you with nice URLs for your website:
```
index index.php index.html index.htm index.shtml;
root /var/www/example.com/public_html;
try_files $uri $uri.html $uri/index.html index.html;
```
Now the problem is tha the devserver does not have the save behaviour, and so far I am now aware of any way to configure it to do this.
```
/output/index.html http://localhost:8000
/output/contact.html http://localhost:8000/contact
/output/sales/index.html http://localhost:8000/sales
/output/sales/index.html http://localhost:8000/sales/ (yes the same)
```
</issue>
<code>
[start of pelican/server.py]
1 from __future__ import print_function
2 import sys
3 try:
4 import SimpleHTTPServer as srvmod
5 except ImportError:
6 import http.server as srvmod # NOQA
7
8 try:
9 import SocketServer as socketserver
10 except ImportError:
11 import socketserver # NOQA
12
13 PORT = len(sys.argv) == 2 and int(sys.argv[1]) or 8000
14
15 Handler = srvmod.SimpleHTTPRequestHandler
16
17 try:
18 httpd = socketserver.TCPServer(("", PORT), Handler)
19 except OSError as e:
20 print("Could not listen on port", PORT)
21 sys.exit(getattr(e, 'exitcode', 1))
22
23
24 print("serving at port", PORT)
25 try:
26 httpd.serve_forever()
27 except KeyboardInterrupt as e:
28 print("shutting down server")
29 httpd.socket.close()
30
[end of pelican/server.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pelican/server.py b/pelican/server.py
--- a/pelican/server.py
+++ b/pelican/server.py
@@ -1,5 +1,7 @@
from __future__ import print_function
+import os
import sys
+import logging
try:
import SimpleHTTPServer as srvmod
except ImportError:
@@ -11,19 +13,36 @@
import socketserver # NOQA
PORT = len(sys.argv) == 2 and int(sys.argv[1]) or 8000
+SUFFIXES = ['','.html','/index.html']
-Handler = srvmod.SimpleHTTPRequestHandler
+class ComplexHTTPRequestHandler(srvmod.SimpleHTTPRequestHandler):
+ def do_GET(self):
+ # we are trying to detect the file by having a fallback mechanism
+ r = None
+ for suffix in SUFFIXES:
+ if not hasattr(self,'original_path'):
+ self.original_path = self.path
+ self.path = self.original_path + suffix
+ path = self.translate_path(self.path)
+ if os.path.exists(path):
+ r = srvmod.SimpleHTTPRequestHandler.do_GET(self)
+ if r is not None:
+ break
+ logging.warning("Unable to find %s file." % self.path)
+ return r
+
+Handler = ComplexHTTPRequestHandler
try:
httpd = socketserver.TCPServer(("", PORT), Handler)
except OSError as e:
- print("Could not listen on port", PORT)
+ logging.error("Could not listen on port %s" % PORT)
sys.exit(getattr(e, 'exitcode', 1))
-print("serving at port", PORT)
+logging.info("serving at port %s" % PORT)
try:
httpd.serve_forever()
except KeyboardInterrupt as e:
- print("shutting down server")
- httpd.socket.close()
+ logging.info("shutting down server")
+ httpd.socket.close()
\ No newline at end of file
|
{"golden_diff": "diff --git a/pelican/server.py b/pelican/server.py\n--- a/pelican/server.py\n+++ b/pelican/server.py\n@@ -1,5 +1,7 @@\n from __future__ import print_function\n+import os\n import sys\n+import logging\n try:\n import SimpleHTTPServer as srvmod\n except ImportError:\n@@ -11,19 +13,36 @@\n import socketserver # NOQA\n \n PORT = len(sys.argv) == 2 and int(sys.argv[1]) or 8000\n+SUFFIXES = ['','.html','/index.html']\n \n-Handler = srvmod.SimpleHTTPRequestHandler\n+class ComplexHTTPRequestHandler(srvmod.SimpleHTTPRequestHandler):\n+ def do_GET(self):\n+ # we are trying to detect the file by having a fallback mechanism\n+ r = None\n+ for suffix in SUFFIXES:\n+ if not hasattr(self,'original_path'):\n+ self.original_path = self.path\n+ self.path = self.original_path + suffix\n+ path = self.translate_path(self.path)\n+ if os.path.exists(path):\n+ r = srvmod.SimpleHTTPRequestHandler.do_GET(self)\n+ if r is not None:\n+ break\n+ logging.warning(\"Unable to find %s file.\" % self.path)\n+ return r\n+\n+Handler = ComplexHTTPRequestHandler\n \n try:\n httpd = socketserver.TCPServer((\"\", PORT), Handler)\n except OSError as e:\n- print(\"Could not listen on port\", PORT)\n+ logging.error(\"Could not listen on port %s\" % PORT)\n sys.exit(getattr(e, 'exitcode', 1))\n \n \n-print(\"serving at port\", PORT)\n+logging.info(\"serving at port %s\" % PORT)\n try:\n httpd.serve_forever()\n except KeyboardInterrupt as e:\n- print(\"shutting down server\")\n- httpd.socket.close()\n+ logging.info(\"shutting down server\")\n+ httpd.socket.close()\n\\ No newline at end of file\n", "issue": "Allow nice URLs on devserver\nWhen you publish the website is quite easy to implement nice URLs. For example an nginx configuraiton like this would provide you with nice URLs for your website:\n\n```\nindex index.php index.html index.htm index.shtml;\nroot /var/www/example.com/public_html;\ntry_files $uri $uri.html $uri/index.html index.html;\n```\n\nNow the problem is tha the devserver does not have the save behaviour, and so far I am now aware of any way to configure it to do this.\n\n```\n/output/index.html http://localhost:8000\n/output/contact.html http://localhost:8000/contact\n/output/sales/index.html http://localhost:8000/sales\n/output/sales/index.html http://localhost:8000/sales/ (yes the same) \n```\n\n", "before_files": [{"content": "from __future__ import print_function\nimport sys\ntry:\n import SimpleHTTPServer as srvmod\nexcept ImportError:\n import http.server as srvmod # NOQA\n\ntry:\n import SocketServer as socketserver\nexcept ImportError:\n import socketserver # NOQA\n\nPORT = len(sys.argv) == 2 and int(sys.argv[1]) or 8000\n\nHandler = srvmod.SimpleHTTPRequestHandler\n\ntry:\n httpd = socketserver.TCPServer((\"\", PORT), Handler)\nexcept OSError as e:\n print(\"Could not listen on port\", PORT)\n sys.exit(getattr(e, 'exitcode', 1))\n\n\nprint(\"serving at port\", PORT)\ntry:\n httpd.serve_forever()\nexcept KeyboardInterrupt as e:\n print(\"shutting down server\")\n httpd.socket.close()\n", "path": "pelican/server.py"}]}
| 946 | 440 |
gh_patches_debug_25372
|
rasdani/github-patches
|
git_diff
|
facebookresearch__CompilerGym-739
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Logical Failure when combing TimeLimit Wrapper with IterateOverBenchmarks
## 🐛 Bug
If an environment is first wrapper with TimeLimit before IterateOverBenchmarks, it will not return "done" as True.
## To Reproduce
Steps to reproduce the behavior:
```
env = TimeLimit(env, step_limit)
env = CycleOverBenchmarks(env, benchmarks)
_, done, _, _ = env.reset()
while not done:
_, done, _, _ = env.step(0)
```
This will not finish. However, if the TimeLimit happens after the Cycle, it has normal behavior.
## Additional context
Assign it to me, I will fix it when I got time.
</issue>
<code>
[start of compiler_gym/wrappers/time_limit.py]
1 # Copyright (c) Facebook, Inc. and its affiliates.
2 #
3 # This source code is licensed under the MIT license found in the
4 # LICENSE file in the root directory of this source tree.
5 from typing import Optional
6
7 from compiler_gym.envs import CompilerEnv
8 from compiler_gym.util.gym_type_hints import ActionType
9 from compiler_gym.wrappers.core import CompilerEnvWrapper
10
11
12 class TimeLimit(CompilerEnvWrapper):
13 """A step-limited wrapper that is compatible with CompilerGym.
14
15 Example usage:
16
17 >>> env = TimeLimit(env, max_episode_steps=3)
18 >>> env.reset()
19 >>> _, _, done, _ = env.step(0)
20 >>> _, _, done, _ = env.step(0)
21 >>> _, _, done, _ = env.step(0)
22 >>> done
23 True
24 """
25
26 def __init__(self, env: CompilerEnv, max_episode_steps: Optional[int] = None):
27 super().__init__(env=env)
28 if max_episode_steps is None and self.env.spec is not None:
29 max_episode_steps = env.spec.max_episode_steps
30 if self.env.spec is not None:
31 self.env.spec.max_episode_steps = max_episode_steps
32 self._max_episode_steps = max_episode_steps
33 self._elapsed_steps = None
34
35 def step(self, action: ActionType, **kwargs):
36 assert (
37 self._elapsed_steps is not None
38 ), "Cannot call env.step() before calling reset()"
39 observation, reward, done, info = self.env.step(action, **kwargs)
40 self._elapsed_steps += 1
41 if self._elapsed_steps >= self._max_episode_steps:
42 info["TimeLimit.truncated"] = not done
43 done = True
44 return observation, reward, done, info
45
46 def reset(self, **kwargs):
47 self._elapsed_steps = 0
48 return self.env.reset(**kwargs)
49
50 def fork(self) -> "TimeLimit":
51 """Fork the wrapped environment.
52
53 The time limit state of the forked environment is the same as the source
54 state.
55 """
56 fkd = type(self)(env=self.env.fork(), max_episode_steps=self._max_episode_steps)
57 fkd._elapsed_steps = self._elapsed_steps # pylint: disable=protected-access
58 return fkd
59
[end of compiler_gym/wrappers/time_limit.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/compiler_gym/wrappers/time_limit.py b/compiler_gym/wrappers/time_limit.py
--- a/compiler_gym/wrappers/time_limit.py
+++ b/compiler_gym/wrappers/time_limit.py
@@ -2,7 +2,7 @@
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
-from typing import Optional
+from typing import Iterable, Optional
from compiler_gym.envs import CompilerEnv
from compiler_gym.util.gym_type_hints import ActionType
@@ -32,12 +32,13 @@
self._max_episode_steps = max_episode_steps
self._elapsed_steps = None
- def step(self, action: ActionType, **kwargs):
+ def multistep(self, actions: Iterable[ActionType], **kwargs):
+ actions = list(actions)
assert (
self._elapsed_steps is not None
), "Cannot call env.step() before calling reset()"
- observation, reward, done, info = self.env.step(action, **kwargs)
- self._elapsed_steps += 1
+ observation, reward, done, info = self.env.multistep(actions, **kwargs)
+ self._elapsed_steps += len(actions)
if self._elapsed_steps >= self._max_episode_steps:
info["TimeLimit.truncated"] = not done
done = True
|
{"golden_diff": "diff --git a/compiler_gym/wrappers/time_limit.py b/compiler_gym/wrappers/time_limit.py\n--- a/compiler_gym/wrappers/time_limit.py\n+++ b/compiler_gym/wrappers/time_limit.py\n@@ -2,7 +2,7 @@\n #\n # This source code is licensed under the MIT license found in the\n # LICENSE file in the root directory of this source tree.\n-from typing import Optional\n+from typing import Iterable, Optional\n \n from compiler_gym.envs import CompilerEnv\n from compiler_gym.util.gym_type_hints import ActionType\n@@ -32,12 +32,13 @@\n self._max_episode_steps = max_episode_steps\n self._elapsed_steps = None\n \n- def step(self, action: ActionType, **kwargs):\n+ def multistep(self, actions: Iterable[ActionType], **kwargs):\n+ actions = list(actions)\n assert (\n self._elapsed_steps is not None\n ), \"Cannot call env.step() before calling reset()\"\n- observation, reward, done, info = self.env.step(action, **kwargs)\n- self._elapsed_steps += 1\n+ observation, reward, done, info = self.env.multistep(actions, **kwargs)\n+ self._elapsed_steps += len(actions)\n if self._elapsed_steps >= self._max_episode_steps:\n info[\"TimeLimit.truncated\"] = not done\n done = True\n", "issue": "Logical Failure when combing TimeLimit Wrapper with IterateOverBenchmarks\n## \ud83d\udc1b Bug\r\n\r\nIf an environment is first wrapper with TimeLimit before IterateOverBenchmarks, it will not return \"done\" as True. \r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n```\r\nenv = TimeLimit(env, step_limit) \r\nenv = CycleOverBenchmarks(env, benchmarks) \r\n_, done, _, _ = env.reset()\r\nwhile not done:\r\n _, done, _, _ = env.step(0) \r\n```\r\nThis will not finish. However, if the TimeLimit happens after the Cycle, it has normal behavior. \r\n\r\n\r\n## Additional context\r\n\r\nAssign it to me, I will fix it when I got time. \n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom typing import Optional\n\nfrom compiler_gym.envs import CompilerEnv\nfrom compiler_gym.util.gym_type_hints import ActionType\nfrom compiler_gym.wrappers.core import CompilerEnvWrapper\n\n\nclass TimeLimit(CompilerEnvWrapper):\n \"\"\"A step-limited wrapper that is compatible with CompilerGym.\n\n Example usage:\n\n >>> env = TimeLimit(env, max_episode_steps=3)\n >>> env.reset()\n >>> _, _, done, _ = env.step(0)\n >>> _, _, done, _ = env.step(0)\n >>> _, _, done, _ = env.step(0)\n >>> done\n True\n \"\"\"\n\n def __init__(self, env: CompilerEnv, max_episode_steps: Optional[int] = None):\n super().__init__(env=env)\n if max_episode_steps is None and self.env.spec is not None:\n max_episode_steps = env.spec.max_episode_steps\n if self.env.spec is not None:\n self.env.spec.max_episode_steps = max_episode_steps\n self._max_episode_steps = max_episode_steps\n self._elapsed_steps = None\n\n def step(self, action: ActionType, **kwargs):\n assert (\n self._elapsed_steps is not None\n ), \"Cannot call env.step() before calling reset()\"\n observation, reward, done, info = self.env.step(action, **kwargs)\n self._elapsed_steps += 1\n if self._elapsed_steps >= self._max_episode_steps:\n info[\"TimeLimit.truncated\"] = not done\n done = True\n return observation, reward, done, info\n\n def reset(self, **kwargs):\n self._elapsed_steps = 0\n return self.env.reset(**kwargs)\n\n def fork(self) -> \"TimeLimit\":\n \"\"\"Fork the wrapped environment.\n\n The time limit state of the forked environment is the same as the source\n state.\n \"\"\"\n fkd = type(self)(env=self.env.fork(), max_episode_steps=self._max_episode_steps)\n fkd._elapsed_steps = self._elapsed_steps # pylint: disable=protected-access\n return fkd\n", "path": "compiler_gym/wrappers/time_limit.py"}]}
| 1,300 | 304 |
gh_patches_debug_51262
|
rasdani/github-patches
|
git_diff
|
conda__conda-5426
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Launching navigator via prompt warnings appear
_From @RidaZubair on May 24, 2017 9:47_
**OS:** Windows
**Anaconda: 4.4.0**
**Actual:**
On launching navigator via prompt following warning appears on prompt

_Copied from original issue: ContinuumIO/navigator#1189_
</issue>
<code>
[start of conda/common/platform.py]
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 from collections import OrderedDict
5 from genericpath import exists
6 from glob import glob
7 from logging import getLogger
8 import sys
9
10 from .compat import iteritems, on_win
11 from .._vendor.auxlib.decorators import memoize
12
13 log = getLogger(__name__)
14
15
16 def is_admin_on_windows(): # pragma: unix no cover
17 # http://stackoverflow.com/a/1026626/2127762
18 if not on_win: # pragma: no cover
19 return False
20 try:
21 from ctypes import windll
22 return windll.shell32.IsUserAnAdmin()() != 0
23 except ImportError as e:
24 log.debug('%r', e)
25 return 'unknown'
26 except Exception as e:
27 log.warn('%r', e)
28 return 'unknown'
29
30
31 @memoize
32 def linux_get_libc_version():
33 """
34 If on linux, returns (libc_family, version), otherwise (None, None)
35 """
36
37 if not sys.platform.startswith('linux'):
38 return None, None
39
40 from os import confstr, confstr_names, readlink
41
42 # Python 2.7 does not have either of these keys in confstr_names, so provide
43 # hard-coded defaults and assert if the key is in confstr_names but differs.
44 # These are defined by POSIX anyway so should never change.
45 confstr_names_fallback = OrderedDict([('CS_GNU_LIBC_VERSION', 2),
46 ('CS_GNU_LIBPTHREAD_VERSION', 3)])
47
48 val = None
49 for k, v in iteritems(confstr_names_fallback):
50 assert k not in confstr_names or confstr_names[k] == v, (
51 "confstr_names_fallback for %s is %s yet in confstr_names it is %s"
52 "" % (k, confstr_names_fallback[k], confstr_names[k])
53 )
54 try:
55 val = str(confstr(v))
56 except:
57 pass
58 else:
59 if val:
60 break
61
62 if not val:
63 # Weird, play it safe and assume glibc 2.5
64 family, version = 'glibc', '2.5'
65 log.warning("Failed to detect libc family and version, assuming %s/%s", family, version)
66 return family, version
67 family, version = val.split(' ')
68
69 # NPTL is just the name of the threading library, even though the
70 # version refers to that of uClibc. readlink() can help to try to
71 # figure out a better name instead.
72 if family == 'NPTL':
73 clibs = glob('/lib/libc.so*')
74 for clib in clibs:
75 clib = readlink(clib)
76 if exists(clib):
77 if clib.startswith('libuClibc'):
78 if version.startswith('0.'):
79 family = 'uClibc'
80 else:
81 family = 'uClibc-ng'
82 return family, version
83 # This could be some other C library; it is unlikely though.
84 family = 'uClibc'
85 log.warning("Failed to detect non-glibc family, assuming %s (%s)", family, version)
86 return family, version
87 return family, version
88
[end of conda/common/platform.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/conda/common/platform.py b/conda/common/platform.py
--- a/conda/common/platform.py
+++ b/conda/common/platform.py
@@ -19,12 +19,12 @@
return False
try:
from ctypes import windll
- return windll.shell32.IsUserAnAdmin()() != 0
+ return windll.shell32.IsUserAnAdmin() != 0
except ImportError as e:
log.debug('%r', e)
return 'unknown'
except Exception as e:
- log.warn('%r', e)
+ log.info('%r', e)
return 'unknown'
|
{"golden_diff": "diff --git a/conda/common/platform.py b/conda/common/platform.py\n--- a/conda/common/platform.py\n+++ b/conda/common/platform.py\n@@ -19,12 +19,12 @@\n return False\n try:\n from ctypes import windll\n- return windll.shell32.IsUserAnAdmin()() != 0\n+ return windll.shell32.IsUserAnAdmin() != 0\n except ImportError as e:\n log.debug('%r', e)\n return 'unknown'\n except Exception as e:\n- log.warn('%r', e)\n+ log.info('%r', e)\n return 'unknown'\n", "issue": "Launching navigator via prompt warnings appear\n_From @RidaZubair on May 24, 2017 9:47_\n\n**OS:** Windows\r\n**Anaconda: 4.4.0**\r\n\r\n**Actual:**\r\nOn launching navigator via prompt following warning appears on prompt\r\n\r\n\r\n\n\n_Copied from original issue: ContinuumIO/navigator#1189_\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom collections import OrderedDict\nfrom genericpath import exists\nfrom glob import glob\nfrom logging import getLogger\nimport sys\n\nfrom .compat import iteritems, on_win\nfrom .._vendor.auxlib.decorators import memoize\n\nlog = getLogger(__name__)\n\n\ndef is_admin_on_windows(): # pragma: unix no cover\n # http://stackoverflow.com/a/1026626/2127762\n if not on_win: # pragma: no cover\n return False\n try:\n from ctypes import windll\n return windll.shell32.IsUserAnAdmin()() != 0\n except ImportError as e:\n log.debug('%r', e)\n return 'unknown'\n except Exception as e:\n log.warn('%r', e)\n return 'unknown'\n\n\n@memoize\ndef linux_get_libc_version():\n \"\"\"\n If on linux, returns (libc_family, version), otherwise (None, None)\n \"\"\"\n\n if not sys.platform.startswith('linux'):\n return None, None\n\n from os import confstr, confstr_names, readlink\n\n # Python 2.7 does not have either of these keys in confstr_names, so provide\n # hard-coded defaults and assert if the key is in confstr_names but differs.\n # These are defined by POSIX anyway so should never change.\n confstr_names_fallback = OrderedDict([('CS_GNU_LIBC_VERSION', 2),\n ('CS_GNU_LIBPTHREAD_VERSION', 3)])\n\n val = None\n for k, v in iteritems(confstr_names_fallback):\n assert k not in confstr_names or confstr_names[k] == v, (\n \"confstr_names_fallback for %s is %s yet in confstr_names it is %s\"\n \"\" % (k, confstr_names_fallback[k], confstr_names[k])\n )\n try:\n val = str(confstr(v))\n except:\n pass\n else:\n if val:\n break\n\n if not val:\n # Weird, play it safe and assume glibc 2.5\n family, version = 'glibc', '2.5'\n log.warning(\"Failed to detect libc family and version, assuming %s/%s\", family, version)\n return family, version\n family, version = val.split(' ')\n\n # NPTL is just the name of the threading library, even though the\n # version refers to that of uClibc. readlink() can help to try to\n # figure out a better name instead.\n if family == 'NPTL':\n clibs = glob('/lib/libc.so*')\n for clib in clibs:\n clib = readlink(clib)\n if exists(clib):\n if clib.startswith('libuClibc'):\n if version.startswith('0.'):\n family = 'uClibc'\n else:\n family = 'uClibc-ng'\n return family, version\n # This could be some other C library; it is unlikely though.\n family = 'uClibc'\n log.warning(\"Failed to detect non-glibc family, assuming %s (%s)\", family, version)\n return family, version\n return family, version\n", "path": "conda/common/platform.py"}]}
| 1,572 | 143 |
gh_patches_debug_24678
|
rasdani/github-patches
|
git_diff
|
cloudtools__troposphere-1693
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
implement AWS::DMS changes from May 14, 2020 update
</issue>
<code>
[start of troposphere/dms.py]
1 # Copyright (c) 2012-2019, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5
6 from . import AWSObject, AWSProperty, Tags
7 from .validators import boolean, integer, network_port, positive_integer
8
9
10 CDC = "cdc"
11 FULL_LOAD = "full-load"
12 FULL_LOAD_AND_CDC = "full-load-and-cdc"
13
14
15 class Certificate(AWSObject):
16 resource_type = "AWS::DMS::Certificate"
17
18 props = {
19 'CertificateIdentifier': (basestring, False),
20 'CertificatePem': (basestring, False),
21 'CertificateWallet': (basestring, False),
22 }
23
24
25 class DynamoDbSettings(AWSProperty):
26 props = {
27 'ServiceAccessRoleArn': (basestring, False),
28 }
29
30
31 class ElasticsearchSettings(AWSProperty):
32 props = {
33 'EndpointUri': (basestring, False),
34 'ErrorRetryDuration': (integer, False),
35 'FullLoadErrorPercentage': (integer, False),
36 'ServiceAccessRoleArn': (basestring, False),
37 }
38
39
40 class KinesisSettings(AWSProperty):
41 props = {
42 'MessageFormat': (basestring, False),
43 'ServiceAccessRoleArn': (basestring, False),
44 'StreamArn': (basestring, False),
45 }
46
47
48 class MongoDbSettings(AWSProperty):
49 props = {
50 'AuthMechanism': (basestring, False),
51 'AuthSource': (basestring, False),
52 'AuthType': (basestring, False),
53 'DatabaseName': (basestring, False),
54 'DocsToInvestigate': (basestring, False),
55 'ExtractDocId': (basestring, False),
56 'NestingLevel': (basestring, False),
57 'Password': (basestring, False),
58 'Port': (network_port, False),
59 'ServerName': (basestring, False),
60 'Username': (basestring, False),
61 }
62
63
64 class S3Settings(AWSProperty):
65 props = {
66 'BucketFolder': (basestring, False),
67 'BucketName': (basestring, False),
68 'CompressionType': (basestring, False),
69 'CsvDelimiter': (basestring, False),
70 'CsvRowDelimiter': (basestring, False),
71 'ExternalTableDefinition': (basestring, False),
72 'ServiceAccessRoleArn': (basestring, False),
73 }
74
75
76 class KafkaSettings(AWSProperty):
77 props = {
78 'Broker': (basestring, False),
79 'Topic': (basestring, False),
80 }
81
82
83 class Endpoint(AWSObject):
84 resource_type = "AWS::DMS::Endpoint"
85
86 props = {
87 'CertificateArn': (basestring, False),
88 'DatabaseName': (basestring, False),
89 'DynamoDbSettings': (DynamoDbSettings, False),
90 'ElasticsearchSettings': (ElasticsearchSettings, False),
91 'EndpointIdentifier': (basestring, False),
92 'EndpointType': (basestring, True),
93 'EngineName': (basestring, True),
94 'ExtraConnectionAttributes': (basestring, False),
95 'KafkaSettings': (KafkaSettings, False),
96 'KinesisSettings': (KinesisSettings, False),
97 'KmsKeyId': (basestring, False),
98 'MongoDbSettings': (MongoDbSettings, False),
99 'Password': (basestring, False),
100 'Port': (network_port, False),
101 'S3Settings': (S3Settings, False),
102 'ServerName': (basestring, False),
103 'SslMode': (basestring, False),
104 'Tags': (Tags, False),
105 'Username': (basestring, False),
106 }
107
108
109 class EventSubscription(AWSObject):
110 resource_type = "AWS::DMS::EventSubscription"
111
112 props = {
113 'Enabled': (boolean, False),
114 'EventCategories': ([basestring], False),
115 'SnsTopicArn': (basestring, True),
116 'SourceIds': ([basestring], False),
117 'SourceType': (basestring, False),
118 'SubscriptionName': (basestring, False),
119 'Tags': (Tags, False),
120 }
121
122
123 class ReplicationInstance(AWSObject):
124 resource_type = "AWS::DMS::ReplicationInstance"
125
126 props = {
127 'AllocatedStorage': (integer, False),
128 'AllowMajorVersionUpgrade': (boolean, False),
129 'AutoMinorVersionUpgrade': (boolean, False),
130 'AvailabilityZone': (basestring, False),
131 'EngineVersion': (basestring, False),
132 'KmsKeyId': (basestring, False),
133 'MultiAZ': (boolean, False),
134 'PreferredMaintenanceWindow': (basestring, False),
135 'PubliclyAccessible': (boolean, False),
136 'ReplicationInstanceClass': (basestring, True),
137 'ReplicationInstanceIdentifier': (basestring, False),
138 'ReplicationSubnetGroupIdentifier': (basestring, False),
139 'Tags': (Tags, False),
140 'VpcSecurityGroupIds': ([basestring], False),
141 }
142
143
144 class ReplicationSubnetGroup(AWSObject):
145 resource_type = "AWS::DMS::ReplicationSubnetGroup"
146
147 props = {
148 'ReplicationSubnetGroupDescription': (basestring, True),
149 'ReplicationSubnetGroupIdentifier': (basestring, False),
150 'SubnetIds': ([basestring], True),
151 'Tags': (Tags, False),
152 }
153
154
155 class ReplicationTask(AWSObject):
156 resource_type = "AWS::DMS::ReplicationTask"
157
158 props = {
159 'CdcStartPosition': (basestring, False),
160 'CdcStartTime': (positive_integer, False),
161 'CdcStopPosition': (basestring, False),
162 'MigrationType': (basestring, True),
163 'ReplicationInstanceArn': (basestring, True),
164 'ReplicationTaskIdentifier': (basestring, False),
165 'ReplicationTaskSettings': (basestring, False),
166 'SourceEndpointArn': (basestring, True),
167 'TableMappings': (basestring, True),
168 'Tags': (Tags, False),
169 'TargetEndpointArn': (basestring, True),
170 }
171
[end of troposphere/dms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/troposphere/dms.py b/troposphere/dms.py
--- a/troposphere/dms.py
+++ b/troposphere/dms.py
@@ -80,6 +80,18 @@
}
+class NeptuneSettings(AWSProperty):
+ props = {
+ 'ErrorRetryDuration': (integer, False),
+ 'IamAuthEnabled': (boolean, False),
+ 'MaxFileSize': (integer, False),
+ 'MaxRetryCount': (integer, False),
+ 'S3BucketFolder': (basestring, False),
+ 'S3BucketName': (basestring, False),
+ 'ServiceAccessRoleArn': (basestring, False),
+ }
+
+
class Endpoint(AWSObject):
resource_type = "AWS::DMS::Endpoint"
@@ -96,6 +108,7 @@
'KinesisSettings': (KinesisSettings, False),
'KmsKeyId': (basestring, False),
'MongoDbSettings': (MongoDbSettings, False),
+ 'NeptuneSettings': (NeptuneSettings, False),
'Password': (basestring, False),
'Port': (network_port, False),
'S3Settings': (S3Settings, False),
@@ -167,4 +180,5 @@
'TableMappings': (basestring, True),
'Tags': (Tags, False),
'TargetEndpointArn': (basestring, True),
+ 'TaskData': (basestring, True),
}
|
{"golden_diff": "diff --git a/troposphere/dms.py b/troposphere/dms.py\n--- a/troposphere/dms.py\n+++ b/troposphere/dms.py\n@@ -80,6 +80,18 @@\n }\n \n \n+class NeptuneSettings(AWSProperty):\n+ props = {\n+ 'ErrorRetryDuration': (integer, False),\n+ 'IamAuthEnabled': (boolean, False),\n+ 'MaxFileSize': (integer, False),\n+ 'MaxRetryCount': (integer, False),\n+ 'S3BucketFolder': (basestring, False),\n+ 'S3BucketName': (basestring, False),\n+ 'ServiceAccessRoleArn': (basestring, False),\n+ }\n+\n+\n class Endpoint(AWSObject):\n resource_type = \"AWS::DMS::Endpoint\"\n \n@@ -96,6 +108,7 @@\n 'KinesisSettings': (KinesisSettings, False),\n 'KmsKeyId': (basestring, False),\n 'MongoDbSettings': (MongoDbSettings, False),\n+ 'NeptuneSettings': (NeptuneSettings, False),\n 'Password': (basestring, False),\n 'Port': (network_port, False),\n 'S3Settings': (S3Settings, False),\n@@ -167,4 +180,5 @@\n 'TableMappings': (basestring, True),\n 'Tags': (Tags, False),\n 'TargetEndpointArn': (basestring, True),\n+ 'TaskData': (basestring, True),\n }\n", "issue": "implement AWS::DMS changes from May 14, 2020 update\n\n", "before_files": [{"content": "# Copyright (c) 2012-2019, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty, Tags\nfrom .validators import boolean, integer, network_port, positive_integer\n\n\nCDC = \"cdc\"\nFULL_LOAD = \"full-load\"\nFULL_LOAD_AND_CDC = \"full-load-and-cdc\"\n\n\nclass Certificate(AWSObject):\n resource_type = \"AWS::DMS::Certificate\"\n\n props = {\n 'CertificateIdentifier': (basestring, False),\n 'CertificatePem': (basestring, False),\n 'CertificateWallet': (basestring, False),\n }\n\n\nclass DynamoDbSettings(AWSProperty):\n props = {\n 'ServiceAccessRoleArn': (basestring, False),\n }\n\n\nclass ElasticsearchSettings(AWSProperty):\n props = {\n 'EndpointUri': (basestring, False),\n 'ErrorRetryDuration': (integer, False),\n 'FullLoadErrorPercentage': (integer, False),\n 'ServiceAccessRoleArn': (basestring, False),\n }\n\n\nclass KinesisSettings(AWSProperty):\n props = {\n 'MessageFormat': (basestring, False),\n 'ServiceAccessRoleArn': (basestring, False),\n 'StreamArn': (basestring, False),\n }\n\n\nclass MongoDbSettings(AWSProperty):\n props = {\n 'AuthMechanism': (basestring, False),\n 'AuthSource': (basestring, False),\n 'AuthType': (basestring, False),\n 'DatabaseName': (basestring, False),\n 'DocsToInvestigate': (basestring, False),\n 'ExtractDocId': (basestring, False),\n 'NestingLevel': (basestring, False),\n 'Password': (basestring, False),\n 'Port': (network_port, False),\n 'ServerName': (basestring, False),\n 'Username': (basestring, False),\n }\n\n\nclass S3Settings(AWSProperty):\n props = {\n 'BucketFolder': (basestring, False),\n 'BucketName': (basestring, False),\n 'CompressionType': (basestring, False),\n 'CsvDelimiter': (basestring, False),\n 'CsvRowDelimiter': (basestring, False),\n 'ExternalTableDefinition': (basestring, False),\n 'ServiceAccessRoleArn': (basestring, False),\n }\n\n\nclass KafkaSettings(AWSProperty):\n props = {\n 'Broker': (basestring, False),\n 'Topic': (basestring, False),\n }\n\n\nclass Endpoint(AWSObject):\n resource_type = \"AWS::DMS::Endpoint\"\n\n props = {\n 'CertificateArn': (basestring, False),\n 'DatabaseName': (basestring, False),\n 'DynamoDbSettings': (DynamoDbSettings, False),\n 'ElasticsearchSettings': (ElasticsearchSettings, False),\n 'EndpointIdentifier': (basestring, False),\n 'EndpointType': (basestring, True),\n 'EngineName': (basestring, True),\n 'ExtraConnectionAttributes': (basestring, False),\n 'KafkaSettings': (KafkaSettings, False),\n 'KinesisSettings': (KinesisSettings, False),\n 'KmsKeyId': (basestring, False),\n 'MongoDbSettings': (MongoDbSettings, False),\n 'Password': (basestring, False),\n 'Port': (network_port, False),\n 'S3Settings': (S3Settings, False),\n 'ServerName': (basestring, False),\n 'SslMode': (basestring, False),\n 'Tags': (Tags, False),\n 'Username': (basestring, False),\n }\n\n\nclass EventSubscription(AWSObject):\n resource_type = \"AWS::DMS::EventSubscription\"\n\n props = {\n 'Enabled': (boolean, False),\n 'EventCategories': ([basestring], False),\n 'SnsTopicArn': (basestring, True),\n 'SourceIds': ([basestring], False),\n 'SourceType': (basestring, False),\n 'SubscriptionName': (basestring, False),\n 'Tags': (Tags, False),\n }\n\n\nclass ReplicationInstance(AWSObject):\n resource_type = \"AWS::DMS::ReplicationInstance\"\n\n props = {\n 'AllocatedStorage': (integer, False),\n 'AllowMajorVersionUpgrade': (boolean, False),\n 'AutoMinorVersionUpgrade': (boolean, False),\n 'AvailabilityZone': (basestring, False),\n 'EngineVersion': (basestring, False),\n 'KmsKeyId': (basestring, False),\n 'MultiAZ': (boolean, False),\n 'PreferredMaintenanceWindow': (basestring, False),\n 'PubliclyAccessible': (boolean, False),\n 'ReplicationInstanceClass': (basestring, True),\n 'ReplicationInstanceIdentifier': (basestring, False),\n 'ReplicationSubnetGroupIdentifier': (basestring, False),\n 'Tags': (Tags, False),\n 'VpcSecurityGroupIds': ([basestring], False),\n }\n\n\nclass ReplicationSubnetGroup(AWSObject):\n resource_type = \"AWS::DMS::ReplicationSubnetGroup\"\n\n props = {\n 'ReplicationSubnetGroupDescription': (basestring, True),\n 'ReplicationSubnetGroupIdentifier': (basestring, False),\n 'SubnetIds': ([basestring], True),\n 'Tags': (Tags, False),\n }\n\n\nclass ReplicationTask(AWSObject):\n resource_type = \"AWS::DMS::ReplicationTask\"\n\n props = {\n 'CdcStartPosition': (basestring, False),\n 'CdcStartTime': (positive_integer, False),\n 'CdcStopPosition': (basestring, False),\n 'MigrationType': (basestring, True),\n 'ReplicationInstanceArn': (basestring, True),\n 'ReplicationTaskIdentifier': (basestring, False),\n 'ReplicationTaskSettings': (basestring, False),\n 'SourceEndpointArn': (basestring, True),\n 'TableMappings': (basestring, True),\n 'Tags': (Tags, False),\n 'TargetEndpointArn': (basestring, True),\n }\n", "path": "troposphere/dms.py"}]}
| 2,322 | 337 |
gh_patches_debug_13580
|
rasdani/github-patches
|
git_diff
|
zulip__zulip-9015
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Import wildcard mentions from Slack into zulip
When a user does a wildcard mention (i.e. `@channel`, `@here`, etc.), we should translate those to a zulip wildcard mention. I'd probably map them all to `@all` for now, but we should write the code in a way where changing the mapping is easy
Import wildcard mentions from Slack into zulip
When a user does a wildcard mention (i.e. `@channel`, `@here`, etc.), we should translate those to a zulip wildcard mention. I'd probably map them all to `@all` for now, but we should write the code in a way where changing the mapping is easy
</issue>
<code>
[start of zerver/lib/slack_message_conversion.py]
1 import re
2 from typing import Any, Dict, Tuple, List
3
4 # stubs
5 ZerverFieldsT = Dict[str, Any]
6 AddedUsersT = Dict[str, int]
7
8 # Slack link can be in the format <http://www.foo.com|www.foo.com> and <http://foo.com/>
9 LINK_REGEX = r"""
10 (<) # match '>'
11 (http:\/\/www\.|https:\/\/www\.|http:\/\/|https:\/\/|ftp:\/\/)? # protocol and www
12 ([a-z0-9]+([\-\.]{1}[a-z0-9]+)*)(\.) # domain name
13 ([a-z]{2,63}(:[0-9]{1,5})?) # domain
14 (\/[^>]*)? # path
15 (\|)?(?:\|([^>]+))? # char after pipe (for slack links)
16 (>)
17 """
18
19 SLACK_MAILTO_REGEX = r"""
20 <((mailto:)? # match `<mailto:`
21 ([\w\.-]+@[\w\.-]+(\.[\w]+)+)) # match email
22 (\|)? # match pipe
23 ([\w\.-]+@[\w\.-]+(\.[\w]+)+)?> # match email
24 """
25
26 SLACK_USERMENTION_REGEX = r"""
27 (<@) # Start with '<@'
28 ([a-zA-Z0-9]+) # Here we have the Slack id
29 (\|)? # We not always have a Vertical line in mention
30 ([a-zA-Z0-9]+)? # If Vertical line is present, this is short name
31 (>) # ends with '>'
32 """
33 # Slack doesn't have mid-word message-formatting like Zulip.
34 # Hence, ~stri~ke doesn't format the word in slack, but ~~stri~~ke
35 # formats the word in Zulip
36 SLACK_STRIKETHROUGH_REGEX = r"""
37 (^|[ -(]|[+-/]|\*|\_|[:-?]|\{|\[|\||\^) # Start after specified characters
38 (\~) # followed by an asterisk
39 ([ -)+-}—]*)([ -}]+) # any character except asterisk
40 (\~) # followed by an asterisk
41 ($|[ -']|[+-/]|[:-?]|\*|\_|\}|\)|\]|\||\^) # ends with specified characters
42 """
43 SLACK_ITALIC_REGEX = r"""
44 (^|[ -(]|[+-/]|[:-?]|\{|\[|\||\^|~)
45 (\_)
46 ([ -^`~—]*)([ -^`-~]+) # any character
47 (\_)
48 ($|[ -']|[+-/]|[:-?]|\}|\)|\]|\||\^|~)
49 """
50 SLACK_BOLD_REGEX = r"""
51 (^|[ -(]|[+-/]|[:-?]|\{|\[|\||\^|~)
52 (\*)
53 ([ -)+-~—]*)([ -)+-~]+) # any character
54 (\*)
55 ($|[ -']|[+-/]|[:-?]|\}|\)|\]|\||\^|~)
56 """
57
58 def get_user_full_name(user: ZerverFieldsT) -> str:
59 if user['deleted'] is False:
60 if user['real_name'] == '':
61 return user['name']
62 else:
63 return user['real_name']
64 else:
65 return user['name']
66
67 # Markdown mapping
68 def convert_to_zulip_markdown(text: str, users: List[ZerverFieldsT],
69 added_users: AddedUsersT) -> Tuple[str, List[int], bool]:
70 mentioned_users_id = []
71 text = convert_markdown_syntax(text, SLACK_BOLD_REGEX, "**")
72 text = convert_markdown_syntax(text, SLACK_STRIKETHROUGH_REGEX, "~~")
73 text = convert_markdown_syntax(text, SLACK_ITALIC_REGEX, "*")
74
75 # Map Slack's mention all: '<!everyone>' to '@**all** '
76 # No regex for this as it can be present anywhere in the sentence
77 text = text.replace('<!everyone>', '@**all**')
78
79 tokens = text.split(' ')
80 for iterator in range(len(tokens)):
81
82 # Check user mentions and change mention format from
83 # '<@slack_id|short_name>' to '@**full_name**'
84 if (re.findall(SLACK_USERMENTION_REGEX, tokens[iterator], re.VERBOSE)):
85 tokens[iterator], user_id = get_user_mentions(tokens[iterator],
86 users, added_users)
87 if user_id is not None:
88 mentioned_users_id.append(user_id)
89
90 text = ' '.join(tokens)
91
92 # Check and convert link format
93 text, has_link = convert_link_format(text)
94 # convert `<mailto:[email protected]>` to `mailto:[email protected]`
95 text, has_mailto_link = convert_mailto_format(text)
96
97 if has_link is True or has_mailto_link is True:
98 message_has_link = True
99 else:
100 message_has_link = False
101
102 return text, mentioned_users_id, message_has_link
103
104 def get_user_mentions(token: str, users: List[ZerverFieldsT],
105 added_users: AddedUsersT) -> Tuple[str, int]:
106 slack_usermention_match = re.search(SLACK_USERMENTION_REGEX, token, re.VERBOSE)
107 short_name = slack_usermention_match.group(4)
108 slack_id = slack_usermention_match.group(2)
109 for user in users:
110 if (user['id'] == slack_id and user['name'] == short_name and short_name) or \
111 (user['id'] == slack_id and short_name is None):
112 full_name = get_user_full_name(user)
113 user_id = added_users[slack_id]
114 mention = "@**" + full_name + "**"
115 token = re.sub(SLACK_USERMENTION_REGEX, mention, token, flags=re.VERBOSE)
116 return token, user_id
117 return token, None
118
119 # Map italic, bold and strikethrough markdown
120 def convert_markdown_syntax(text: str, regex: str, zulip_keyword: str) -> str:
121 """
122 Returns:
123 1. For strikethrough formatting: This maps Slack's '~strike~' to Zulip's '~~strike~~'
124 2. For bold formatting: This maps Slack's '*bold*' to Zulip's '**bold**'
125 3. For italic formatting: This maps Slack's '_italic_' to Zulip's '*italic*'
126 """
127 for match in re.finditer(regex, text, re.VERBOSE):
128 converted_token = (match.group(1) + zulip_keyword + match.group(3)
129 + match.group(4) + zulip_keyword + match.group(6))
130 text = text.replace(match.group(0), converted_token)
131 return text
132
133 def convert_link_format(text: str) -> Tuple[str, bool]:
134 """
135 1. Converts '<https://foo.com>' to 'https://foo.com'
136 2. Converts '<https://foo.com|foo>' to 'https://foo.com|foo'
137 """
138 has_link = False
139 for match in re.finditer(LINK_REGEX, text, re.VERBOSE):
140 converted_text = match.group(0).replace('>', '').replace('<', '')
141 has_link = True
142 text = text.replace(match.group(0), converted_text)
143 return text, has_link
144
145 def convert_mailto_format(text: str) -> Tuple[str, bool]:
146 """
147 1. Converts '<mailto:[email protected]>' to 'mailto:[email protected]'
148 2. Converts '<mailto:[email protected]|[email protected]>' to 'mailto:[email protected]'
149 """
150 has_link = False
151 for match in re.finditer(SLACK_MAILTO_REGEX, text, re.VERBOSE):
152 has_link = True
153 text = text.replace(match.group(0), match.group(1))
154 return text, has_link
155
[end of zerver/lib/slack_message_conversion.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/zerver/lib/slack_message_conversion.py b/zerver/lib/slack_message_conversion.py
--- a/zerver/lib/slack_message_conversion.py
+++ b/zerver/lib/slack_message_conversion.py
@@ -73,8 +73,12 @@
text = convert_markdown_syntax(text, SLACK_ITALIC_REGEX, "*")
# Map Slack's mention all: '<!everyone>' to '@**all** '
+ # Map Slack's mention all: '<!channel>' to '@**all** '
+ # Map Slack's mention all: '<!here>' to '@**all** '
# No regex for this as it can be present anywhere in the sentence
text = text.replace('<!everyone>', '@**all**')
+ text = text.replace('<!channel>', '@**all**')
+ text = text.replace('<!here>', '@**all**')
tokens = text.split(' ')
for iterator in range(len(tokens)):
|
{"golden_diff": "diff --git a/zerver/lib/slack_message_conversion.py b/zerver/lib/slack_message_conversion.py\n--- a/zerver/lib/slack_message_conversion.py\n+++ b/zerver/lib/slack_message_conversion.py\n@@ -73,8 +73,12 @@\n text = convert_markdown_syntax(text, SLACK_ITALIC_REGEX, \"*\")\n \n # Map Slack's mention all: '<!everyone>' to '@**all** '\n+ # Map Slack's mention all: '<!channel>' to '@**all** '\n+ # Map Slack's mention all: '<!here>' to '@**all** '\n # No regex for this as it can be present anywhere in the sentence\n text = text.replace('<!everyone>', '@**all**')\n+ text = text.replace('<!channel>', '@**all**')\n+ text = text.replace('<!here>', '@**all**')\n \n tokens = text.split(' ')\n for iterator in range(len(tokens)):\n", "issue": "Import wildcard mentions from Slack into zulip\nWhen a user does a wildcard mention (i.e. `@channel`, `@here`, etc.), we should translate those to a zulip wildcard mention. I'd probably map them all to `@all` for now, but we should write the code in a way where changing the mapping is easy\nImport wildcard mentions from Slack into zulip\nWhen a user does a wildcard mention (i.e. `@channel`, `@here`, etc.), we should translate those to a zulip wildcard mention. I'd probably map them all to `@all` for now, but we should write the code in a way where changing the mapping is easy\n", "before_files": [{"content": "import re\nfrom typing import Any, Dict, Tuple, List\n\n# stubs\nZerverFieldsT = Dict[str, Any]\nAddedUsersT = Dict[str, int]\n\n# Slack link can be in the format <http://www.foo.com|www.foo.com> and <http://foo.com/>\nLINK_REGEX = r\"\"\"\n (<) # match '>'\n (http:\\/\\/www\\.|https:\\/\\/www\\.|http:\\/\\/|https:\\/\\/|ftp:\\/\\/)? # protocol and www\n ([a-z0-9]+([\\-\\.]{1}[a-z0-9]+)*)(\\.) # domain name\n ([a-z]{2,63}(:[0-9]{1,5})?) # domain\n (\\/[^>]*)? # path\n (\\|)?(?:\\|([^>]+))? # char after pipe (for slack links)\n (>)\n \"\"\"\n\nSLACK_MAILTO_REGEX = r\"\"\"\n <((mailto:)? # match `<mailto:`\n ([\\w\\.-]+@[\\w\\.-]+(\\.[\\w]+)+)) # match email\n (\\|)? # match pipe\n ([\\w\\.-]+@[\\w\\.-]+(\\.[\\w]+)+)?> # match email\n \"\"\"\n\nSLACK_USERMENTION_REGEX = r\"\"\"\n (<@) # Start with '<@'\n ([a-zA-Z0-9]+) # Here we have the Slack id\n (\\|)? # We not always have a Vertical line in mention\n ([a-zA-Z0-9]+)? # If Vertical line is present, this is short name\n (>) # ends with '>'\n \"\"\"\n# Slack doesn't have mid-word message-formatting like Zulip.\n# Hence, ~stri~ke doesn't format the word in slack, but ~~stri~~ke\n# formats the word in Zulip\nSLACK_STRIKETHROUGH_REGEX = r\"\"\"\n (^|[ -(]|[+-/]|\\*|\\_|[:-?]|\\{|\\[|\\||\\^) # Start after specified characters\n (\\~) # followed by an asterisk\n ([ -)+-}\u2014]*)([ -}]+) # any character except asterisk\n (\\~) # followed by an asterisk\n ($|[ -']|[+-/]|[:-?]|\\*|\\_|\\}|\\)|\\]|\\||\\^) # ends with specified characters\n \"\"\"\nSLACK_ITALIC_REGEX = r\"\"\"\n (^|[ -(]|[+-/]|[:-?]|\\{|\\[|\\||\\^|~)\n (\\_)\n ([ -^`~\u2014]*)([ -^`-~]+) # any character\n (\\_)\n ($|[ -']|[+-/]|[:-?]|\\}|\\)|\\]|\\||\\^|~)\n \"\"\"\nSLACK_BOLD_REGEX = r\"\"\"\n (^|[ -(]|[+-/]|[:-?]|\\{|\\[|\\||\\^|~)\n (\\*)\n ([ -)+-~\u2014]*)([ -)+-~]+) # any character\n (\\*)\n ($|[ -']|[+-/]|[:-?]|\\}|\\)|\\]|\\||\\^|~)\n \"\"\"\n\ndef get_user_full_name(user: ZerverFieldsT) -> str:\n if user['deleted'] is False:\n if user['real_name'] == '':\n return user['name']\n else:\n return user['real_name']\n else:\n return user['name']\n\n# Markdown mapping\ndef convert_to_zulip_markdown(text: str, users: List[ZerverFieldsT],\n added_users: AddedUsersT) -> Tuple[str, List[int], bool]:\n mentioned_users_id = []\n text = convert_markdown_syntax(text, SLACK_BOLD_REGEX, \"**\")\n text = convert_markdown_syntax(text, SLACK_STRIKETHROUGH_REGEX, \"~~\")\n text = convert_markdown_syntax(text, SLACK_ITALIC_REGEX, \"*\")\n\n # Map Slack's mention all: '<!everyone>' to '@**all** '\n # No regex for this as it can be present anywhere in the sentence\n text = text.replace('<!everyone>', '@**all**')\n\n tokens = text.split(' ')\n for iterator in range(len(tokens)):\n\n # Check user mentions and change mention format from\n # '<@slack_id|short_name>' to '@**full_name**'\n if (re.findall(SLACK_USERMENTION_REGEX, tokens[iterator], re.VERBOSE)):\n tokens[iterator], user_id = get_user_mentions(tokens[iterator],\n users, added_users)\n if user_id is not None:\n mentioned_users_id.append(user_id)\n\n text = ' '.join(tokens)\n\n # Check and convert link format\n text, has_link = convert_link_format(text)\n # convert `<mailto:[email protected]>` to `mailto:[email protected]`\n text, has_mailto_link = convert_mailto_format(text)\n\n if has_link is True or has_mailto_link is True:\n message_has_link = True\n else:\n message_has_link = False\n\n return text, mentioned_users_id, message_has_link\n\ndef get_user_mentions(token: str, users: List[ZerverFieldsT],\n added_users: AddedUsersT) -> Tuple[str, int]:\n slack_usermention_match = re.search(SLACK_USERMENTION_REGEX, token, re.VERBOSE)\n short_name = slack_usermention_match.group(4)\n slack_id = slack_usermention_match.group(2)\n for user in users:\n if (user['id'] == slack_id and user['name'] == short_name and short_name) or \\\n (user['id'] == slack_id and short_name is None):\n full_name = get_user_full_name(user)\n user_id = added_users[slack_id]\n mention = \"@**\" + full_name + \"**\"\n token = re.sub(SLACK_USERMENTION_REGEX, mention, token, flags=re.VERBOSE)\n return token, user_id\n return token, None\n\n# Map italic, bold and strikethrough markdown\ndef convert_markdown_syntax(text: str, regex: str, zulip_keyword: str) -> str:\n \"\"\"\n Returns:\n 1. For strikethrough formatting: This maps Slack's '~strike~' to Zulip's '~~strike~~'\n 2. For bold formatting: This maps Slack's '*bold*' to Zulip's '**bold**'\n 3. For italic formatting: This maps Slack's '_italic_' to Zulip's '*italic*'\n \"\"\"\n for match in re.finditer(regex, text, re.VERBOSE):\n converted_token = (match.group(1) + zulip_keyword + match.group(3)\n + match.group(4) + zulip_keyword + match.group(6))\n text = text.replace(match.group(0), converted_token)\n return text\n\ndef convert_link_format(text: str) -> Tuple[str, bool]:\n \"\"\"\n 1. Converts '<https://foo.com>' to 'https://foo.com'\n 2. Converts '<https://foo.com|foo>' to 'https://foo.com|foo'\n \"\"\"\n has_link = False\n for match in re.finditer(LINK_REGEX, text, re.VERBOSE):\n converted_text = match.group(0).replace('>', '').replace('<', '')\n has_link = True\n text = text.replace(match.group(0), converted_text)\n return text, has_link\n\ndef convert_mailto_format(text: str) -> Tuple[str, bool]:\n \"\"\"\n 1. Converts '<mailto:[email protected]>' to 'mailto:[email protected]'\n 2. Converts '<mailto:[email protected]|[email protected]>' to 'mailto:[email protected]'\n \"\"\"\n has_link = False\n for match in re.finditer(SLACK_MAILTO_REGEX, text, re.VERBOSE):\n has_link = True\n text = text.replace(match.group(0), match.group(1))\n return text, has_link\n", "path": "zerver/lib/slack_message_conversion.py"}]}
| 2,798 | 208 |
gh_patches_debug_2668
|
rasdani/github-patches
|
git_diff
|
facebookresearch__ParlAI-1821
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Obselete download link for CLEVR Dataset
Apparently, the current link to CLEVR in the source code is "https://s3-us-west-1.amazonaws.com/clevr/CLEVR_v1.0.zip" that returns the message "All access to this object has been disabled"
When I try to execute the following line of code
`!python ~/ParlAI/examples/display_data.py -t clevr`
I obtain
```
[creating task(s): clevr]
[building data: /root/ParlAI/data/CLEVR]
[ downloading: https://s3-us-west-1.amazonaws.com/clevr/CLEVR_v1.0.zip to /root/ParlAI/data/CLEVR/CLEVR_v1.0.zip ]
Downloading CLEVR_v1.0.zip: 0.00B [00:00, ?B/s]
unpacking CLEVR_v1.0.zip
Traceback (most recent call last):
File "/root/ParlAI/parlai/core/agents.py", line 819, in _create_task_agents
task_agents = my_module.create_agent(opt)
AttributeError: module 'parlai.tasks.clevr.agents' has no attribute 'create_agent'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/root/ParlAI/examples/display_data.py", line 22, in <module>
display_data(opt)
File "/root/ParlAI/parlai/scripts/display_data.py", line 42, in display_data
world = create_task(opt, agent)
File "/root/ParlAI/parlai/core/worlds.py", line 1151, in create_task
world = create_task_world(opt, user_agents, default_world=default_world)
File "/root/ParlAI/parlai/core/worlds.py", line 1108, in create_task_world
opt, user_agents, default_world=default_world
File "/root/ParlAI/parlai/core/worlds.py", line 1068, in _get_task_world
task_agents = _create_task_agents(opt)
File "/root/ParlAI/parlai/core/agents.py", line 822, in _create_task_agents
return create_task_agent_from_taskname(opt)
File "/root/ParlAI/parlai/core/agents.py", line 776, in create_task_agent_from_taskname
task_agents = teacher_class(opt)
File "/root/ParlAI/parlai/tasks/clevr/agents.py", line 45, in __init__
data_path, self.images_path = _path(opt)
File "/root/ParlAI/parlai/tasks/clevr/agents.py", line 15, in _path
build(opt)
File "/root/ParlAI/parlai/tasks/clevr/build.py", line 28, in build
build_data.untar(dpath, fname)
File "/root/ParlAI/parlai/core/build_data.py", line 180, in untar
shutil.unpack_archive(fullpath, path)
File "/usr/lib/python3.6/shutil.py", line 983, in unpack_archive
func(filename, extract_dir, **kwargs)
File "/usr/lib/python3.6/shutil.py", line 883, in _unpack_zipfile
raise ReadError("%s is not a zip file" % filename)
shutil.ReadError: /root/ParlAI/data/CLEVR/CLEVR_v1.0.zip is not a zip file
```
I found the following working link on CLEVR webpage (https://cs.stanford.edu/people/jcjohns/clevr/):
https://dl.fbaipublicfiles.com/clevr/CLEVR_v1.0.zip
</issue>
<code>
[start of parlai/tasks/clevr/build.py]
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6 # Download and build the data if it does not exist.
7
8 import parlai.core.build_data as build_data
9 import os
10
11
12 def build(opt):
13 dpath = os.path.join(opt['datapath'], 'CLEVR')
14 version = 'v1.0'
15
16 if not build_data.built(dpath, version_string=version):
17 print('[building data: ' + dpath + ']')
18 # An older version exists, so remove these outdated files.
19 if build_data.built(dpath):
20 build_data.remove_dir(dpath)
21 build_data.make_dir(dpath)
22
23 # Download the data.
24 fname = 'CLEVR_v1.0.zip'
25 url = 'https://s3-us-west-1.amazonaws.com/clevr/'
26
27 build_data.download(url + fname, dpath, fname)
28 build_data.untar(dpath, fname)
29
30 # Mark the data as built.
31 build_data.mark_done(dpath, version_string=version)
32
[end of parlai/tasks/clevr/build.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/parlai/tasks/clevr/build.py b/parlai/tasks/clevr/build.py
--- a/parlai/tasks/clevr/build.py
+++ b/parlai/tasks/clevr/build.py
@@ -22,7 +22,7 @@
# Download the data.
fname = 'CLEVR_v1.0.zip'
- url = 'https://s3-us-west-1.amazonaws.com/clevr/'
+ url = 'https://dl.fbaipublicfiles.com/clevr/'
build_data.download(url + fname, dpath, fname)
build_data.untar(dpath, fname)
|
{"golden_diff": "diff --git a/parlai/tasks/clevr/build.py b/parlai/tasks/clevr/build.py\n--- a/parlai/tasks/clevr/build.py\n+++ b/parlai/tasks/clevr/build.py\n@@ -22,7 +22,7 @@\n \n # Download the data.\n fname = 'CLEVR_v1.0.zip'\n- url = 'https://s3-us-west-1.amazonaws.com/clevr/'\n+ url = 'https://dl.fbaipublicfiles.com/clevr/'\n \n build_data.download(url + fname, dpath, fname)\n build_data.untar(dpath, fname)\n", "issue": "Obselete download link for CLEVR Dataset\nApparently, the current link to CLEVR in the source code is \"https://s3-us-west-1.amazonaws.com/clevr/CLEVR_v1.0.zip\" that returns the message \"All access to this object has been disabled\"\r\n\r\nWhen I try to execute the following line of code\r\n\r\n`!python ~/ParlAI/examples/display_data.py -t clevr`\r\n\r\nI obtain\r\n\r\n```\r\n[creating task(s): clevr]\r\n[building data: /root/ParlAI/data/CLEVR]\r\n[ downloading: https://s3-us-west-1.amazonaws.com/clevr/CLEVR_v1.0.zip to /root/ParlAI/data/CLEVR/CLEVR_v1.0.zip ]\r\nDownloading CLEVR_v1.0.zip: 0.00B [00:00, ?B/s]\r\nunpacking CLEVR_v1.0.zip\r\nTraceback (most recent call last):\r\n File \"/root/ParlAI/parlai/core/agents.py\", line 819, in _create_task_agents\r\n task_agents = my_module.create_agent(opt)\r\nAttributeError: module 'parlai.tasks.clevr.agents' has no attribute 'create_agent'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/root/ParlAI/examples/display_data.py\", line 22, in <module>\r\n display_data(opt)\r\n File \"/root/ParlAI/parlai/scripts/display_data.py\", line 42, in display_data\r\n world = create_task(opt, agent)\r\n File \"/root/ParlAI/parlai/core/worlds.py\", line 1151, in create_task\r\n world = create_task_world(opt, user_agents, default_world=default_world)\r\n File \"/root/ParlAI/parlai/core/worlds.py\", line 1108, in create_task_world\r\n opt, user_agents, default_world=default_world\r\n File \"/root/ParlAI/parlai/core/worlds.py\", line 1068, in _get_task_world\r\n task_agents = _create_task_agents(opt)\r\n File \"/root/ParlAI/parlai/core/agents.py\", line 822, in _create_task_agents\r\n return create_task_agent_from_taskname(opt)\r\n File \"/root/ParlAI/parlai/core/agents.py\", line 776, in create_task_agent_from_taskname\r\n task_agents = teacher_class(opt)\r\n File \"/root/ParlAI/parlai/tasks/clevr/agents.py\", line 45, in __init__\r\n data_path, self.images_path = _path(opt)\r\n File \"/root/ParlAI/parlai/tasks/clevr/agents.py\", line 15, in _path\r\n build(opt)\r\n File \"/root/ParlAI/parlai/tasks/clevr/build.py\", line 28, in build\r\n build_data.untar(dpath, fname)\r\n File \"/root/ParlAI/parlai/core/build_data.py\", line 180, in untar\r\n shutil.unpack_archive(fullpath, path)\r\n File \"/usr/lib/python3.6/shutil.py\", line 983, in unpack_archive\r\n func(filename, extract_dir, **kwargs)\r\n File \"/usr/lib/python3.6/shutil.py\", line 883, in _unpack_zipfile\r\n raise ReadError(\"%s is not a zip file\" % filename)\r\nshutil.ReadError: /root/ParlAI/data/CLEVR/CLEVR_v1.0.zip is not a zip file\r\n```\r\n\r\nI found the following working link on CLEVR webpage (https://cs.stanford.edu/people/jcjohns/clevr/):\r\n\r\nhttps://dl.fbaipublicfiles.com/clevr/CLEVR_v1.0.zip\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n# Download and build the data if it does not exist.\n\nimport parlai.core.build_data as build_data\nimport os\n\n\ndef build(opt):\n dpath = os.path.join(opt['datapath'], 'CLEVR')\n version = 'v1.0'\n\n if not build_data.built(dpath, version_string=version):\n print('[building data: ' + dpath + ']')\n # An older version exists, so remove these outdated files.\n if build_data.built(dpath):\n build_data.remove_dir(dpath)\n build_data.make_dir(dpath)\n\n # Download the data.\n fname = 'CLEVR_v1.0.zip'\n url = 'https://s3-us-west-1.amazonaws.com/clevr/'\n\n build_data.download(url + fname, dpath, fname)\n build_data.untar(dpath, fname)\n\n # Mark the data as built.\n build_data.mark_done(dpath, version_string=version)\n", "path": "parlai/tasks/clevr/build.py"}]}
| 1,707 | 145 |
gh_patches_debug_33339
|
rasdani/github-patches
|
git_diff
|
pyinstaller__pyinstaller-2111
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
New numpy hook for intel mkl libraries
Hello - anaconda and winpython build numpy against Intel's mkl libraries. Building someone that uses numpy will need to have those libraries.
Here is a winpython hook:
http://stackoverflow.com/a/35853001
and I adapted that for anaconda & python3:
https://github.com/maqifrnswa/scimpy/blob/master/pyinstaller-hooks/hook-numpy.py
</issue>
<code>
[start of PyInstaller/hooks/hook-numpy.core.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2013-2016, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License with exception
5 # for distributing bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #-----------------------------------------------------------------------------
9 # On Windows, numpy depends on a set of dynamically-detemined DLLs, which means
10 # that PyInstaller's static analysis can't find them. See https://github.com/pyinstaller/pyinstaller/issues/1969
11 # for more information. The typical error message: ``Intel MKL FATAL ERROR:
12 # Cannot load mkl_intel_thread.dll.``
13 #
14 # So, include them manually.
15 import os
16 import os.path
17 from PyInstaller.utils.hooks import get_package_paths
18
19 pkg_base, pkg_dir = get_package_paths('numpy.core')
20 # Walk through all files in ``numpy.core``, looking for DLLs.
21 datas = []
22 for f in os.listdir(pkg_dir):
23 extension = os.path.splitext(f)[1]
24 if extension == '.dll':
25 # Produce the tuple ('/abs/path/to/libs/numpy/core/file.dll', '')
26 source = os.path.join(pkg_dir, f)
27 datas.append((source, ''))
28
[end of PyInstaller/hooks/hook-numpy.core.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/PyInstaller/hooks/hook-numpy.core.py b/PyInstaller/hooks/hook-numpy.core.py
--- a/PyInstaller/hooks/hook-numpy.core.py
+++ b/PyInstaller/hooks/hook-numpy.core.py
@@ -6,22 +6,41 @@
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-# On Windows, numpy depends on a set of dynamically-detemined DLLs, which means
-# that PyInstaller's static analysis can't find them. See https://github.com/pyinstaller/pyinstaller/issues/1969
-# for more information. The typical error message: ``Intel MKL FATAL ERROR:
-# Cannot load mkl_intel_thread.dll.``
+# If numpy is built with MKL support it depends on a set of libraries loaded
+# at runtime. Since PyInstaller's static analysis can't find them they must be
+# included manually.
#
-# So, include them manually.
+# See
+# https://github.com/pyinstaller/pyinstaller/issues/1881
+# https://github.com/pyinstaller/pyinstaller/issues/1969
+# for more information
import os
import os.path
+import re
from PyInstaller.utils.hooks import get_package_paths
+from PyInstaller import log as logging
+from PyInstaller import compat
+binaries = []
+
+# look for libraries in numpy package path
pkg_base, pkg_dir = get_package_paths('numpy.core')
-# Walk through all files in ``numpy.core``, looking for DLLs.
-datas = []
-for f in os.listdir(pkg_dir):
- extension = os.path.splitext(f)[1]
- if extension == '.dll':
- # Produce the tuple ('/abs/path/to/libs/numpy/core/file.dll', '')
- source = os.path.join(pkg_dir, f)
- datas.append((source, ''))
+re_anylib = re.compile(r'\w+\.(?:dll|so)', re.IGNORECASE)
+dlls_pkg = [f for f in os.listdir(pkg_dir) if re_anylib.match(f)]
+binaries += [(os.path.join(pkg_dir, f), '') for f in dlls_pkg]
+
+# look for MKL libraries in pythons lib directory
+# TODO: check numpy.__config__ if numpy is actually depending on MKL
+# TODO: determine which directories are searched by the os linker
+if compat.is_win:
+ lib_dir = os.path.join(compat.base_prefix, "Library", "bin")
+else:
+ lib_dir = os.path.join(compat.base_prefix, "lib")
+if os.path.isdir(lib_dir):
+ re_mkllib = re.compile(r'^(?:lib)?mkl\w+\.(?:dll|so)', re.IGNORECASE)
+ dlls_mkl = [f for f in os.listdir(lib_dir) if re_mkllib.match(f)]
+ if dlls_mkl:
+ logger = logging.getLogger(__name__)
+ logger.info("MKL libraries found when importing numpy. Adding MKL to binaries")
+ binaries += [(os.path.join(lib_dir, f), '') for f in dlls_mkl]
+
|
{"golden_diff": "diff --git a/PyInstaller/hooks/hook-numpy.core.py b/PyInstaller/hooks/hook-numpy.core.py\n--- a/PyInstaller/hooks/hook-numpy.core.py\n+++ b/PyInstaller/hooks/hook-numpy.core.py\n@@ -6,22 +6,41 @@\n #\n # The full license is in the file COPYING.txt, distributed with this software.\n #-----------------------------------------------------------------------------\n-# On Windows, numpy depends on a set of dynamically-detemined DLLs, which means\n-# that PyInstaller's static analysis can't find them. See https://github.com/pyinstaller/pyinstaller/issues/1969\n-# for more information. The typical error message: ``Intel MKL FATAL ERROR:\n-# Cannot load mkl_intel_thread.dll.``\n+# If numpy is built with MKL support it depends on a set of libraries loaded\n+# at runtime. Since PyInstaller's static analysis can't find them they must be\n+# included manually.\n #\n-# So, include them manually.\n+# See\n+# https://github.com/pyinstaller/pyinstaller/issues/1881\n+# https://github.com/pyinstaller/pyinstaller/issues/1969\n+# for more information\n import os\n import os.path\n+import re\n from PyInstaller.utils.hooks import get_package_paths\n+from PyInstaller import log as logging \n+from PyInstaller import compat\n \n+binaries = []\n+\n+# look for libraries in numpy package path\n pkg_base, pkg_dir = get_package_paths('numpy.core')\n-# Walk through all files in ``numpy.core``, looking for DLLs.\n-datas = []\n-for f in os.listdir(pkg_dir):\n- extension = os.path.splitext(f)[1]\n- if extension == '.dll':\n- # Produce the tuple ('/abs/path/to/libs/numpy/core/file.dll', '')\n- source = os.path.join(pkg_dir, f)\n- datas.append((source, ''))\n+re_anylib = re.compile(r'\\w+\\.(?:dll|so)', re.IGNORECASE)\n+dlls_pkg = [f for f in os.listdir(pkg_dir) if re_anylib.match(f)]\n+binaries += [(os.path.join(pkg_dir, f), '') for f in dlls_pkg]\n+\n+# look for MKL libraries in pythons lib directory\n+# TODO: check numpy.__config__ if numpy is actually depending on MKL\n+# TODO: determine which directories are searched by the os linker\n+if compat.is_win:\n+ lib_dir = os.path.join(compat.base_prefix, \"Library\", \"bin\")\n+else:\n+ lib_dir = os.path.join(compat.base_prefix, \"lib\")\n+if os.path.isdir(lib_dir):\n+ re_mkllib = re.compile(r'^(?:lib)?mkl\\w+\\.(?:dll|so)', re.IGNORECASE)\n+ dlls_mkl = [f for f in os.listdir(lib_dir) if re_mkllib.match(f)]\n+ if dlls_mkl:\n+ logger = logging.getLogger(__name__)\n+ logger.info(\"MKL libraries found when importing numpy. Adding MKL to binaries\")\n+ binaries += [(os.path.join(lib_dir, f), '') for f in dlls_mkl]\n+\n", "issue": "New numpy hook for intel mkl libraries\nHello - anaconda and winpython build numpy against Intel's mkl libraries. Building someone that uses numpy will need to have those libraries.\n\nHere is a winpython hook:\nhttp://stackoverflow.com/a/35853001\n\nand I adapted that for anaconda & python3:\nhttps://github.com/maqifrnswa/scimpy/blob/master/pyinstaller-hooks/hook-numpy.py\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2016, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n# On Windows, numpy depends on a set of dynamically-detemined DLLs, which means\n# that PyInstaller's static analysis can't find them. See https://github.com/pyinstaller/pyinstaller/issues/1969\n# for more information. The typical error message: ``Intel MKL FATAL ERROR:\n# Cannot load mkl_intel_thread.dll.``\n#\n# So, include them manually.\nimport os\nimport os.path\nfrom PyInstaller.utils.hooks import get_package_paths\n\npkg_base, pkg_dir = get_package_paths('numpy.core')\n# Walk through all files in ``numpy.core``, looking for DLLs.\ndatas = []\nfor f in os.listdir(pkg_dir):\n extension = os.path.splitext(f)[1]\n if extension == '.dll':\n # Produce the tuple ('/abs/path/to/libs/numpy/core/file.dll', '')\n source = os.path.join(pkg_dir, f)\n datas.append((source, ''))\n", "path": "PyInstaller/hooks/hook-numpy.core.py"}]}
| 946 | 679 |
gh_patches_debug_40264
|
rasdani/github-patches
|
git_diff
|
tensorflow__addons-2381
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Ideally the bins in histogram equalization is variable rather than limited to 256
https://github.com/tensorflow/addons/blob/d26e2ed5f68092aed57016a7005ce534b1be3dce/tensorflow_addons/image/color_ops.py#L36
</issue>
<code>
[start of tensorflow_addons/image/color_ops.py]
1 # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Color operations.
16 equalize: Equalizes image histogram
17 sharpness: Sharpen image
18 """
19
20 import tensorflow as tf
21
22 from tensorflow_addons.utils.types import TensorLike, Number
23 from tensorflow_addons.image.utils import to_4D_image, from_4D_image
24 from tensorflow_addons.image.compose_ops import blend
25
26 from typing import Optional
27 from functools import partial
28
29
30 def _scale_channel(image: TensorLike, channel: int) -> tf.Tensor:
31 """Scale the data in the channel to implement equalize."""
32 image_dtype = image.dtype
33 image = tf.cast(image[:, :, channel], tf.int32)
34
35 # Compute the histogram of the image channel.
36 histo = tf.histogram_fixed_width(image, [0, 255], nbins=256)
37
38 # For the purposes of computing the step, filter out the nonzeros.
39 nonzero_histo = tf.boolean_mask(histo, histo != 0)
40 step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
41
42 # If step is zero, return the original image. Otherwise, build
43 # lut from the full histogram and step and then index from it.
44 if step == 0:
45 result = image
46 else:
47 lut_values = (tf.cumsum(histo, exclusive=True) + (step // 2)) // step
48 lut_values = tf.clip_by_value(lut_values, 0, 255)
49 result = tf.gather(lut_values, image)
50
51 return tf.cast(result, image_dtype)
52
53
54 def _equalize_image(image: TensorLike) -> tf.Tensor:
55 """Implements Equalize function from PIL using TF ops."""
56 image = tf.stack([_scale_channel(image, c) for c in range(image.shape[-1])], -1)
57 return image
58
59
60 @tf.function
61 def equalize(image: TensorLike, name: Optional[str] = None) -> tf.Tensor:
62 """Equalize image(s)
63
64 Args:
65 images: A tensor of shape
66 `(num_images, num_rows, num_columns, num_channels)` (NHWC), or
67 `(num_rows, num_columns, num_channels)` (HWC), or
68 `(num_rows, num_columns)` (HW). The rank must be statically known (the
69 shape is not `TensorShape(None)`).
70 name: The name of the op.
71 Returns:
72 Image(s) with the same type and shape as `images`, equalized.
73 """
74 with tf.name_scope(name or "equalize"):
75 image_dims = tf.rank(image)
76 image = to_4D_image(image)
77 fn = partial(_equalize_image)
78 image = tf.map_fn(fn, image)
79 return from_4D_image(image, image_dims)
80
81
82 def _sharpness_image(image: TensorLike, factor: Number) -> tf.Tensor:
83 """Implements Sharpness function from PIL using TF ops."""
84 orig_image = image
85 image_dtype = image.dtype
86 image_channels = image.shape[-1]
87 image = tf.cast(image, tf.float32)
88
89 # SMOOTH PIL Kernel.
90 kernel = (
91 tf.constant(
92 [[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32, shape=[3, 3, 1, 1]
93 )
94 / 13.0
95 )
96 kernel = tf.tile(kernel, [1, 1, image_channels, 1])
97
98 # Apply kernel channel-wise.
99 degenerate = tf.nn.depthwise_conv2d(
100 image, kernel, strides=[1, 1, 1, 1], padding="VALID", dilations=[1, 1]
101 )
102 degenerate = tf.cast(degenerate, image_dtype)
103
104 # For the borders of the resulting image, fill in the values of the original image.
105 mask = tf.ones_like(degenerate)
106 padded_mask = tf.pad(mask, [[0, 0], [1, 1], [1, 1], [0, 0]])
107 padded_degenerate = tf.pad(degenerate, [[0, 0], [1, 1], [1, 1], [0, 0]])
108 result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_image)
109
110 # Blend the final result.
111 blended = blend(result, orig_image, factor)
112 return tf.cast(blended, image_dtype)
113
114
115 @tf.function
116 def sharpness(
117 image: TensorLike, factor: Number, name: Optional[str] = None
118 ) -> tf.Tensor:
119 """Change sharpness of image(s).
120
121 Args:
122 image: A tensor of shape
123 `(num_images, num_rows, num_columns, num_channels)` (NHWC), or
124 `(num_rows, num_columns, num_channels)` (HWC)
125 factor: A floating point value or Tensor above 0.0.
126 name: The name of the op.
127 Returns:
128 Image(s) with the same type and shape as `images`, sharper.
129 """
130 with tf.name_scope(name or "sharpness"):
131 image_dims = tf.rank(image)
132 image = to_4D_image(image)
133 image = _sharpness_image(image, factor=factor)
134 return from_4D_image(image, image_dims)
135
[end of tensorflow_addons/image/color_ops.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/tensorflow_addons/image/color_ops.py b/tensorflow_addons/image/color_ops.py
--- a/tensorflow_addons/image/color_ops.py
+++ b/tensorflow_addons/image/color_ops.py
@@ -27,17 +27,17 @@
from functools import partial
-def _scale_channel(image: TensorLike, channel: int) -> tf.Tensor:
+def _scale_channel(image: TensorLike, channel: int, bins: int = 256) -> tf.Tensor:
"""Scale the data in the channel to implement equalize."""
image_dtype = image.dtype
image = tf.cast(image[:, :, channel], tf.int32)
# Compute the histogram of the image channel.
- histo = tf.histogram_fixed_width(image, [0, 255], nbins=256)
+ histo = tf.histogram_fixed_width(image, [0, bins - 1], nbins=bins)
# For the purposes of computing the step, filter out the nonzeros.
nonzero_histo = tf.boolean_mask(histo, histo != 0)
- step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
+ step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // (bins - 1)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
@@ -45,20 +45,24 @@
result = image
else:
lut_values = (tf.cumsum(histo, exclusive=True) + (step // 2)) // step
- lut_values = tf.clip_by_value(lut_values, 0, 255)
+ lut_values = tf.clip_by_value(lut_values, 0, bins - 1)
result = tf.gather(lut_values, image)
return tf.cast(result, image_dtype)
-def _equalize_image(image: TensorLike) -> tf.Tensor:
+def _equalize_image(image: TensorLike, bins: int = 256) -> tf.Tensor:
"""Implements Equalize function from PIL using TF ops."""
- image = tf.stack([_scale_channel(image, c) for c in range(image.shape[-1])], -1)
+ image = tf.stack(
+ [_scale_channel(image, c, bins) for c in range(image.shape[-1])], -1
+ )
return image
@tf.function
-def equalize(image: TensorLike, name: Optional[str] = None) -> tf.Tensor:
+def equalize(
+ image: TensorLike, bins: int = 256, name: Optional[str] = None
+) -> tf.Tensor:
"""Equalize image(s)
Args:
@@ -67,6 +71,7 @@
`(num_rows, num_columns, num_channels)` (HWC), or
`(num_rows, num_columns)` (HW). The rank must be statically known (the
shape is not `TensorShape(None)`).
+ bins: The number of bins in the histogram.
name: The name of the op.
Returns:
Image(s) with the same type and shape as `images`, equalized.
@@ -75,7 +80,7 @@
image_dims = tf.rank(image)
image = to_4D_image(image)
fn = partial(_equalize_image)
- image = tf.map_fn(fn, image)
+ image = tf.map_fn(lambda x: fn(x, bins), image)
return from_4D_image(image, image_dims)
|
{"golden_diff": "diff --git a/tensorflow_addons/image/color_ops.py b/tensorflow_addons/image/color_ops.py\n--- a/tensorflow_addons/image/color_ops.py\n+++ b/tensorflow_addons/image/color_ops.py\n@@ -27,17 +27,17 @@\n from functools import partial\n \n \n-def _scale_channel(image: TensorLike, channel: int) -> tf.Tensor:\n+def _scale_channel(image: TensorLike, channel: int, bins: int = 256) -> tf.Tensor:\n \"\"\"Scale the data in the channel to implement equalize.\"\"\"\n image_dtype = image.dtype\n image = tf.cast(image[:, :, channel], tf.int32)\n \n # Compute the histogram of the image channel.\n- histo = tf.histogram_fixed_width(image, [0, 255], nbins=256)\n+ histo = tf.histogram_fixed_width(image, [0, bins - 1], nbins=bins)\n \n # For the purposes of computing the step, filter out the nonzeros.\n nonzero_histo = tf.boolean_mask(histo, histo != 0)\n- step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255\n+ step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // (bins - 1)\n \n # If step is zero, return the original image. Otherwise, build\n # lut from the full histogram and step and then index from it.\n@@ -45,20 +45,24 @@\n result = image\n else:\n lut_values = (tf.cumsum(histo, exclusive=True) + (step // 2)) // step\n- lut_values = tf.clip_by_value(lut_values, 0, 255)\n+ lut_values = tf.clip_by_value(lut_values, 0, bins - 1)\n result = tf.gather(lut_values, image)\n \n return tf.cast(result, image_dtype)\n \n \n-def _equalize_image(image: TensorLike) -> tf.Tensor:\n+def _equalize_image(image: TensorLike, bins: int = 256) -> tf.Tensor:\n \"\"\"Implements Equalize function from PIL using TF ops.\"\"\"\n- image = tf.stack([_scale_channel(image, c) for c in range(image.shape[-1])], -1)\n+ image = tf.stack(\n+ [_scale_channel(image, c, bins) for c in range(image.shape[-1])], -1\n+ )\n return image\n \n \n @tf.function\n-def equalize(image: TensorLike, name: Optional[str] = None) -> tf.Tensor:\n+def equalize(\n+ image: TensorLike, bins: int = 256, name: Optional[str] = None\n+) -> tf.Tensor:\n \"\"\"Equalize image(s)\n \n Args:\n@@ -67,6 +71,7 @@\n `(num_rows, num_columns, num_channels)` (HWC), or\n `(num_rows, num_columns)` (HW). The rank must be statically known (the\n shape is not `TensorShape(None)`).\n+ bins: The number of bins in the histogram.\n name: The name of the op.\n Returns:\n Image(s) with the same type and shape as `images`, equalized.\n@@ -75,7 +80,7 @@\n image_dims = tf.rank(image)\n image = to_4D_image(image)\n fn = partial(_equalize_image)\n- image = tf.map_fn(fn, image)\n+ image = tf.map_fn(lambda x: fn(x, bins), image)\n return from_4D_image(image, image_dims)\n", "issue": "Ideally the bins in histogram equalization is variable rather than limited to 256 \nhttps://github.com/tensorflow/addons/blob/d26e2ed5f68092aed57016a7005ce534b1be3dce/tensorflow_addons/image/color_ops.py#L36\n", "before_files": [{"content": "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Color operations.\n equalize: Equalizes image histogram\n sharpness: Sharpen image\n\"\"\"\n\nimport tensorflow as tf\n\nfrom tensorflow_addons.utils.types import TensorLike, Number\nfrom tensorflow_addons.image.utils import to_4D_image, from_4D_image\nfrom tensorflow_addons.image.compose_ops import blend\n\nfrom typing import Optional\nfrom functools import partial\n\n\ndef _scale_channel(image: TensorLike, channel: int) -> tf.Tensor:\n \"\"\"Scale the data in the channel to implement equalize.\"\"\"\n image_dtype = image.dtype\n image = tf.cast(image[:, :, channel], tf.int32)\n\n # Compute the histogram of the image channel.\n histo = tf.histogram_fixed_width(image, [0, 255], nbins=256)\n\n # For the purposes of computing the step, filter out the nonzeros.\n nonzero_histo = tf.boolean_mask(histo, histo != 0)\n step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255\n\n # If step is zero, return the original image. Otherwise, build\n # lut from the full histogram and step and then index from it.\n if step == 0:\n result = image\n else:\n lut_values = (tf.cumsum(histo, exclusive=True) + (step // 2)) // step\n lut_values = tf.clip_by_value(lut_values, 0, 255)\n result = tf.gather(lut_values, image)\n\n return tf.cast(result, image_dtype)\n\n\ndef _equalize_image(image: TensorLike) -> tf.Tensor:\n \"\"\"Implements Equalize function from PIL using TF ops.\"\"\"\n image = tf.stack([_scale_channel(image, c) for c in range(image.shape[-1])], -1)\n return image\n\n\[email protected]\ndef equalize(image: TensorLike, name: Optional[str] = None) -> tf.Tensor:\n \"\"\"Equalize image(s)\n\n Args:\n images: A tensor of shape\n `(num_images, num_rows, num_columns, num_channels)` (NHWC), or\n `(num_rows, num_columns, num_channels)` (HWC), or\n `(num_rows, num_columns)` (HW). The rank must be statically known (the\n shape is not `TensorShape(None)`).\n name: The name of the op.\n Returns:\n Image(s) with the same type and shape as `images`, equalized.\n \"\"\"\n with tf.name_scope(name or \"equalize\"):\n image_dims = tf.rank(image)\n image = to_4D_image(image)\n fn = partial(_equalize_image)\n image = tf.map_fn(fn, image)\n return from_4D_image(image, image_dims)\n\n\ndef _sharpness_image(image: TensorLike, factor: Number) -> tf.Tensor:\n \"\"\"Implements Sharpness function from PIL using TF ops.\"\"\"\n orig_image = image\n image_dtype = image.dtype\n image_channels = image.shape[-1]\n image = tf.cast(image, tf.float32)\n\n # SMOOTH PIL Kernel.\n kernel = (\n tf.constant(\n [[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32, shape=[3, 3, 1, 1]\n )\n / 13.0\n )\n kernel = tf.tile(kernel, [1, 1, image_channels, 1])\n\n # Apply kernel channel-wise.\n degenerate = tf.nn.depthwise_conv2d(\n image, kernel, strides=[1, 1, 1, 1], padding=\"VALID\", dilations=[1, 1]\n )\n degenerate = tf.cast(degenerate, image_dtype)\n\n # For the borders of the resulting image, fill in the values of the original image.\n mask = tf.ones_like(degenerate)\n padded_mask = tf.pad(mask, [[0, 0], [1, 1], [1, 1], [0, 0]])\n padded_degenerate = tf.pad(degenerate, [[0, 0], [1, 1], [1, 1], [0, 0]])\n result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_image)\n\n # Blend the final result.\n blended = blend(result, orig_image, factor)\n return tf.cast(blended, image_dtype)\n\n\[email protected]\ndef sharpness(\n image: TensorLike, factor: Number, name: Optional[str] = None\n) -> tf.Tensor:\n \"\"\"Change sharpness of image(s).\n\n Args:\n image: A tensor of shape\n `(num_images, num_rows, num_columns, num_channels)` (NHWC), or\n `(num_rows, num_columns, num_channels)` (HWC)\n factor: A floating point value or Tensor above 0.0.\n name: The name of the op.\n Returns:\n Image(s) with the same type and shape as `images`, sharper.\n \"\"\"\n with tf.name_scope(name or \"sharpness\"):\n image_dims = tf.rank(image)\n image = to_4D_image(image)\n image = _sharpness_image(image, factor=factor)\n return from_4D_image(image, image_dims)\n", "path": "tensorflow_addons/image/color_ops.py"}]}
| 2,213 | 801 |
gh_patches_debug_38724
|
rasdani/github-patches
|
git_diff
|
Qiskit__qiskit-3792
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Scheduler should be supported in execute pipeline
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues to confirm this idea does not exist. -->
### What is the expected enhancement?
It should be configurable to schedule your circuit into pulses during execute.
</issue>
<code>
[start of qiskit/execute.py]
1 # -*- coding: utf-8 -*-
2
3 # This code is part of Qiskit.
4 #
5 # (C) Copyright IBM 2017, 2019.
6 #
7 # This code is licensed under the Apache License, Version 2.0. You may
8 # obtain a copy of this license in the LICENSE.txt file in the root directory
9 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
10 #
11 # Any modifications or derivative works of this code must retain this
12 # copyright notice, and modified files need to carry a notice indicating
13 # that they have been altered from the originals.
14
15 """
16 =============================================
17 Executing Experiments (:mod:`qiskit.execute`)
18 =============================================
19
20 .. currentmodule:: qiskit.execute
21
22 .. autofunction:: execute
23 """
24 from qiskit.compiler import transpile, assemble
25 from qiskit.qobj.utils import MeasLevel, MeasReturnType
26
27
28 def execute(experiments, backend,
29 basis_gates=None, coupling_map=None, # circuit transpile options
30 backend_properties=None, initial_layout=None,
31 seed_transpiler=None, optimization_level=None, pass_manager=None,
32 qobj_id=None, qobj_header=None, shots=1024, # common run options
33 memory=False, max_credits=10, seed_simulator=None,
34 default_qubit_los=None, default_meas_los=None, # schedule run options
35 schedule_los=None, meas_level=MeasLevel.CLASSIFIED,
36 meas_return=MeasReturnType.AVERAGE,
37 memory_slots=None, memory_slot_size=100, rep_time=None, parameter_binds=None,
38 **run_config):
39 """Execute a list of :class:`qiskit.circuit.QuantumCircuit` or
40 :class:`qiskit.pulse.Schedule` on a backend.
41
42 The execution is asynchronous, and a handle to a job instance is returned.
43
44 Args:
45 experiments (QuantumCircuit or list[QuantumCircuit] or Schedule or list[Schedule]):
46 Circuit(s) or pulse schedule(s) to execute
47
48 backend (BaseBackend):
49 Backend to execute circuits on.
50 Transpiler options are automatically grabbed from
51 backend.configuration() and backend.properties().
52 If any other option is explicitly set (e.g. coupling_map), it
53 will override the backend's.
54
55 basis_gates (list[str]):
56 List of basis gate names to unroll to.
57 e.g:
58 ['u1', 'u2', 'u3', 'cx']
59 If None, do not unroll.
60
61 coupling_map (CouplingMap or list):
62 Coupling map (perhaps custom) to target in mapping.
63 Multiple formats are supported:
64 a. CouplingMap instance
65
66 b. list
67 Must be given as an adjacency matrix, where each entry
68 specifies all two-qubit interactions supported by backend
69 e.g:
70 [[0, 1], [0, 3], [1, 2], [1, 5], [2, 5], [4, 1], [5, 3]]
71
72 backend_properties (BackendProperties):
73 Properties returned by a backend, including information on gate
74 errors, readout errors, qubit coherence times, etc. Find a backend
75 that provides this information with:
76 ``backend.properties()``
77
78 initial_layout (Layout or dict or list):
79 Initial position of virtual qubits on physical qubits.
80 If this layout makes the circuit compatible with the coupling_map
81 constraints, it will be used.
82 The final layout is not guaranteed to be the same, as the transpiler
83 may permute qubits through swaps or other means.
84
85 Multiple formats are supported:
86 a. Layout instance
87
88 b. dict
89 virtual to physical:
90 {qr[0]: 0,
91 qr[1]: 3,
92 qr[2]: 5}
93
94 physical to virtual:
95 {0: qr[0],
96 3: qr[1],
97 5: qr[2]}
98
99 c. list
100 virtual to physical:
101 [0, 3, 5] # virtual qubits are ordered (in addition to named)
102
103 physical to virtual:
104 [qr[0], None, None, qr[1], None, qr[2]]
105
106 seed_transpiler (int):
107 Sets random seed for the stochastic parts of the transpiler
108
109 optimization_level (int):
110 How much optimization to perform on the circuits.
111 Higher levels generate more optimized circuits,
112 at the expense of longer transpilation time.
113 0: No optimization
114 1: Light optimization
115 2: Heavy optimization
116 3: Highest optimization
117 If None, level 1 will be chosen as default.
118
119 pass_manager (PassManager):
120 The pass manager to use during transpilation. If this arg is present,
121 auto-selection of pass manager based on the transpile options will be
122 turned off and this pass manager will be used directly.
123
124 qobj_id (str):
125 String identifier to annotate the Qobj
126
127 qobj_header (QobjHeader or dict):
128 User input that will be inserted in Qobj header, and will also be
129 copied to the corresponding Result header. Headers do not affect the run.
130
131 shots (int):
132 Number of repetitions of each circuit, for sampling. Default: 1024
133
134 memory (bool):
135 If True, per-shot measurement bitstrings are returned as well
136 (provided the backend supports it). For OpenPulse jobs, only
137 measurement level 2 supports this option. Default: False
138
139 max_credits (int):
140 Maximum credits to spend on job. Default: 10
141
142 seed_simulator (int):
143 Random seed to control sampling, for when backend is a simulator
144
145 default_qubit_los (list):
146 List of default qubit LO frequencies in Hz
147
148 default_meas_los (list):
149 List of default meas LO frequencies in Hz
150
151 schedule_los (None or list[Union[Dict[PulseChannel, float], LoConfig]] or \
152 Union[Dict[PulseChannel, float], LoConfig]):
153 Experiment LO configurations
154
155 meas_level (int or MeasLevel):
156 Set the appropriate level of the measurement output for pulse experiments.
157
158 meas_return (str or MeasReturn):
159 Level of measurement data for the backend to return
160 For `meas_level` 0 and 1:
161 "single" returns information from every shot.
162 "avg" returns average measurement output (averaged over number of shots).
163
164 memory_slots (int):
165 Number of classical memory slots used in this job.
166
167 memory_slot_size (int):
168 Size of each memory slot if the output is Level 0.
169
170 rep_time (int): repetition time of the experiment in μs.
171 The delay between experiments will be rep_time.
172 Must be from the list provided by the device.
173
174 parameter_binds (list[dict]):
175 List of Parameter bindings over which the set of experiments will be
176 executed. Each list element (bind) should be of the form
177 {Parameter1: value1, Parameter2: value2, ...}. All binds will be
178 executed across all experiments, e.g. if parameter_binds is a
179 length-n list, and there are m experiments, a total of m x n
180 experiments will be run (one for each experiment/bind pair).
181
182 run_config (dict):
183 Extra arguments used to configure the run (e.g. for Aer configurable backends).
184 Refer to the backend documentation for details on these arguments.
185 Note: for now, these keyword arguments will both be copied to the
186 Qobj config, and passed to backend.run()
187
188 Returns:
189 BaseJob: returns job instance derived from BaseJob
190
191 Raises:
192 QiskitError: if the execution cannot be interpreted as either circuits or schedules
193
194 Example:
195 Construct a 5-qubit GHZ circuit and execute 4321 shots on a backend.
196
197 .. jupyter-execute::
198
199 from qiskit import QuantumCircuit, execute, BasicAer
200
201 backend = BasicAer.get_backend('qasm_simulator')
202
203 qc = QuantumCircuit(5, 5)
204 qc.h(0)
205 qc.cx(0, range(1, 5))
206 qc.measure_all()
207
208 job = execute(qc, backend, shots=4321)
209 """
210
211 # transpiling the circuits using given transpile options
212 experiments = transpile(experiments,
213 basis_gates=basis_gates,
214 coupling_map=coupling_map,
215 backend_properties=backend_properties,
216 initial_layout=initial_layout,
217 seed_transpiler=seed_transpiler,
218 optimization_level=optimization_level,
219 backend=backend,
220 pass_manager=pass_manager,
221 )
222
223 # assembling the circuits into a qobj to be run on the backend
224 qobj = assemble(experiments,
225 qobj_id=qobj_id,
226 qobj_header=qobj_header,
227 shots=shots,
228 memory=memory,
229 max_credits=max_credits,
230 seed_simulator=seed_simulator,
231 default_qubit_los=default_qubit_los,
232 default_meas_los=default_meas_los,
233 schedule_los=schedule_los,
234 meas_level=meas_level,
235 meas_return=meas_return,
236 memory_slots=memory_slots,
237 memory_slot_size=memory_slot_size,
238 rep_time=rep_time,
239 parameter_binds=parameter_binds,
240 backend=backend,
241 **run_config
242 )
243
244 # executing the circuits on the backend and returning the job
245 return backend.run(qobj, **run_config)
246
[end of qiskit/execute.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/qiskit/execute.py b/qiskit/execute.py
--- a/qiskit/execute.py
+++ b/qiskit/execute.py
@@ -21,8 +21,10 @@
.. autofunction:: execute
"""
-from qiskit.compiler import transpile, assemble
+from qiskit.compiler import transpile, assemble, schedule
from qiskit.qobj.utils import MeasLevel, MeasReturnType
+from qiskit.pulse import Schedule
+from qiskit.exceptions import QiskitError
def execute(experiments, backend,
@@ -35,6 +37,7 @@
schedule_los=None, meas_level=MeasLevel.CLASSIFIED,
meas_return=MeasReturnType.AVERAGE,
memory_slots=None, memory_slot_size=100, rep_time=None, parameter_binds=None,
+ schedule_circuit=False, inst_map=None, meas_map=None, scheduling_method=None,
**run_config):
"""Execute a list of :class:`qiskit.circuit.QuantumCircuit` or
:class:`qiskit.pulse.Schedule` on a backend.
@@ -179,6 +182,21 @@
length-n list, and there are m experiments, a total of m x n
experiments will be run (one for each experiment/bind pair).
+ schedule_circuit (bool):
+ If ``True``, ``experiments`` will be converted to ``Schedule``s prior to
+ execution.
+
+ inst_map (InstructionScheduleMap):
+ Mapping of circuit operations to pulse schedules. If None, defaults to the
+ ``instruction_schedule_map`` of ``backend``.
+
+ meas_map (list(list(int))):
+ List of sets of qubits that must be measured together. If None, defaults to
+ the ``meas_map`` of ``backend``.
+
+ scheduling_method (str or list(str)):
+ Optionally specify a particular scheduling method.
+
run_config (dict):
Extra arguments used to configure the run (e.g. for Aer configurable backends).
Refer to the backend documentation for details on these arguments.
@@ -220,6 +238,16 @@
pass_manager=pass_manager,
)
+ if schedule_circuit:
+ if isinstance(experiments, Schedule) or isinstance(experiments[0], Schedule):
+ raise QiskitError("Must supply QuantumCircuit to schedule circuit.")
+ experiments = schedule(circuits=experiments,
+ backend=backend,
+ inst_map=inst_map,
+ meas_map=meas_map,
+ method=scheduling_method
+ )
+
# assembling the circuits into a qobj to be run on the backend
qobj = assemble(experiments,
qobj_id=qobj_id,
|
{"golden_diff": "diff --git a/qiskit/execute.py b/qiskit/execute.py\n--- a/qiskit/execute.py\n+++ b/qiskit/execute.py\n@@ -21,8 +21,10 @@\n \n .. autofunction:: execute\n \"\"\"\n-from qiskit.compiler import transpile, assemble\n+from qiskit.compiler import transpile, assemble, schedule\n from qiskit.qobj.utils import MeasLevel, MeasReturnType\n+from qiskit.pulse import Schedule\n+from qiskit.exceptions import QiskitError\n \n \n def execute(experiments, backend,\n@@ -35,6 +37,7 @@\n schedule_los=None, meas_level=MeasLevel.CLASSIFIED,\n meas_return=MeasReturnType.AVERAGE,\n memory_slots=None, memory_slot_size=100, rep_time=None, parameter_binds=None,\n+ schedule_circuit=False, inst_map=None, meas_map=None, scheduling_method=None,\n **run_config):\n \"\"\"Execute a list of :class:`qiskit.circuit.QuantumCircuit` or\n :class:`qiskit.pulse.Schedule` on a backend.\n@@ -179,6 +182,21 @@\n length-n list, and there are m experiments, a total of m x n\n experiments will be run (one for each experiment/bind pair).\n \n+ schedule_circuit (bool):\n+ If ``True``, ``experiments`` will be converted to ``Schedule``s prior to\n+ execution.\n+\n+ inst_map (InstructionScheduleMap):\n+ Mapping of circuit operations to pulse schedules. If None, defaults to the\n+ ``instruction_schedule_map`` of ``backend``.\n+\n+ meas_map (list(list(int))):\n+ List of sets of qubits that must be measured together. If None, defaults to\n+ the ``meas_map`` of ``backend``.\n+\n+ scheduling_method (str or list(str)):\n+ Optionally specify a particular scheduling method.\n+\n run_config (dict):\n Extra arguments used to configure the run (e.g. for Aer configurable backends).\n Refer to the backend documentation for details on these arguments.\n@@ -220,6 +238,16 @@\n pass_manager=pass_manager,\n )\n \n+ if schedule_circuit:\n+ if isinstance(experiments, Schedule) or isinstance(experiments[0], Schedule):\n+ raise QiskitError(\"Must supply QuantumCircuit to schedule circuit.\")\n+ experiments = schedule(circuits=experiments,\n+ backend=backend,\n+ inst_map=inst_map,\n+ meas_map=meas_map,\n+ method=scheduling_method\n+ )\n+\n # assembling the circuits into a qobj to be run on the backend\n qobj = assemble(experiments,\n qobj_id=qobj_id,\n", "issue": "Scheduler should be supported in execute pipeline\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues to confirm this idea does not exist. -->\r\n\r\n### What is the expected enhancement?\r\nIt should be configurable to schedule your circuit into pulses during execute.\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\n=============================================\nExecuting Experiments (:mod:`qiskit.execute`)\n=============================================\n\n.. currentmodule:: qiskit.execute\n\n.. autofunction:: execute\n\"\"\"\nfrom qiskit.compiler import transpile, assemble\nfrom qiskit.qobj.utils import MeasLevel, MeasReturnType\n\n\ndef execute(experiments, backend,\n basis_gates=None, coupling_map=None, # circuit transpile options\n backend_properties=None, initial_layout=None,\n seed_transpiler=None, optimization_level=None, pass_manager=None,\n qobj_id=None, qobj_header=None, shots=1024, # common run options\n memory=False, max_credits=10, seed_simulator=None,\n default_qubit_los=None, default_meas_los=None, # schedule run options\n schedule_los=None, meas_level=MeasLevel.CLASSIFIED,\n meas_return=MeasReturnType.AVERAGE,\n memory_slots=None, memory_slot_size=100, rep_time=None, parameter_binds=None,\n **run_config):\n \"\"\"Execute a list of :class:`qiskit.circuit.QuantumCircuit` or\n :class:`qiskit.pulse.Schedule` on a backend.\n\n The execution is asynchronous, and a handle to a job instance is returned.\n\n Args:\n experiments (QuantumCircuit or list[QuantumCircuit] or Schedule or list[Schedule]):\n Circuit(s) or pulse schedule(s) to execute\n\n backend (BaseBackend):\n Backend to execute circuits on.\n Transpiler options are automatically grabbed from\n backend.configuration() and backend.properties().\n If any other option is explicitly set (e.g. coupling_map), it\n will override the backend's.\n\n basis_gates (list[str]):\n List of basis gate names to unroll to.\n e.g:\n ['u1', 'u2', 'u3', 'cx']\n If None, do not unroll.\n\n coupling_map (CouplingMap or list):\n Coupling map (perhaps custom) to target in mapping.\n Multiple formats are supported:\n a. CouplingMap instance\n\n b. list\n Must be given as an adjacency matrix, where each entry\n specifies all two-qubit interactions supported by backend\n e.g:\n [[0, 1], [0, 3], [1, 2], [1, 5], [2, 5], [4, 1], [5, 3]]\n\n backend_properties (BackendProperties):\n Properties returned by a backend, including information on gate\n errors, readout errors, qubit coherence times, etc. Find a backend\n that provides this information with:\n ``backend.properties()``\n\n initial_layout (Layout or dict or list):\n Initial position of virtual qubits on physical qubits.\n If this layout makes the circuit compatible with the coupling_map\n constraints, it will be used.\n The final layout is not guaranteed to be the same, as the transpiler\n may permute qubits through swaps or other means.\n\n Multiple formats are supported:\n a. Layout instance\n\n b. dict\n virtual to physical:\n {qr[0]: 0,\n qr[1]: 3,\n qr[2]: 5}\n\n physical to virtual:\n {0: qr[0],\n 3: qr[1],\n 5: qr[2]}\n\n c. list\n virtual to physical:\n [0, 3, 5] # virtual qubits are ordered (in addition to named)\n\n physical to virtual:\n [qr[0], None, None, qr[1], None, qr[2]]\n\n seed_transpiler (int):\n Sets random seed for the stochastic parts of the transpiler\n\n optimization_level (int):\n How much optimization to perform on the circuits.\n Higher levels generate more optimized circuits,\n at the expense of longer transpilation time.\n 0: No optimization\n 1: Light optimization\n 2: Heavy optimization\n 3: Highest optimization\n If None, level 1 will be chosen as default.\n\n pass_manager (PassManager):\n The pass manager to use during transpilation. If this arg is present,\n auto-selection of pass manager based on the transpile options will be\n turned off and this pass manager will be used directly.\n\n qobj_id (str):\n String identifier to annotate the Qobj\n\n qobj_header (QobjHeader or dict):\n User input that will be inserted in Qobj header, and will also be\n copied to the corresponding Result header. Headers do not affect the run.\n\n shots (int):\n Number of repetitions of each circuit, for sampling. Default: 1024\n\n memory (bool):\n If True, per-shot measurement bitstrings are returned as well\n (provided the backend supports it). For OpenPulse jobs, only\n measurement level 2 supports this option. Default: False\n\n max_credits (int):\n Maximum credits to spend on job. Default: 10\n\n seed_simulator (int):\n Random seed to control sampling, for when backend is a simulator\n\n default_qubit_los (list):\n List of default qubit LO frequencies in Hz\n\n default_meas_los (list):\n List of default meas LO frequencies in Hz\n\n schedule_los (None or list[Union[Dict[PulseChannel, float], LoConfig]] or \\\n Union[Dict[PulseChannel, float], LoConfig]):\n Experiment LO configurations\n\n meas_level (int or MeasLevel):\n Set the appropriate level of the measurement output for pulse experiments.\n\n meas_return (str or MeasReturn):\n Level of measurement data for the backend to return\n For `meas_level` 0 and 1:\n \"single\" returns information from every shot.\n \"avg\" returns average measurement output (averaged over number of shots).\n\n memory_slots (int):\n Number of classical memory slots used in this job.\n\n memory_slot_size (int):\n Size of each memory slot if the output is Level 0.\n\n rep_time (int): repetition time of the experiment in \u03bcs.\n The delay between experiments will be rep_time.\n Must be from the list provided by the device.\n\n parameter_binds (list[dict]):\n List of Parameter bindings over which the set of experiments will be\n executed. Each list element (bind) should be of the form\n {Parameter1: value1, Parameter2: value2, ...}. All binds will be\n executed across all experiments, e.g. if parameter_binds is a\n length-n list, and there are m experiments, a total of m x n\n experiments will be run (one for each experiment/bind pair).\n\n run_config (dict):\n Extra arguments used to configure the run (e.g. for Aer configurable backends).\n Refer to the backend documentation for details on these arguments.\n Note: for now, these keyword arguments will both be copied to the\n Qobj config, and passed to backend.run()\n\n Returns:\n BaseJob: returns job instance derived from BaseJob\n\n Raises:\n QiskitError: if the execution cannot be interpreted as either circuits or schedules\n\n Example:\n Construct a 5-qubit GHZ circuit and execute 4321 shots on a backend.\n\n .. jupyter-execute::\n\n from qiskit import QuantumCircuit, execute, BasicAer\n\n backend = BasicAer.get_backend('qasm_simulator')\n\n qc = QuantumCircuit(5, 5)\n qc.h(0)\n qc.cx(0, range(1, 5))\n qc.measure_all()\n\n job = execute(qc, backend, shots=4321)\n \"\"\"\n\n # transpiling the circuits using given transpile options\n experiments = transpile(experiments,\n basis_gates=basis_gates,\n coupling_map=coupling_map,\n backend_properties=backend_properties,\n initial_layout=initial_layout,\n seed_transpiler=seed_transpiler,\n optimization_level=optimization_level,\n backend=backend,\n pass_manager=pass_manager,\n )\n\n # assembling the circuits into a qobj to be run on the backend\n qobj = assemble(experiments,\n qobj_id=qobj_id,\n qobj_header=qobj_header,\n shots=shots,\n memory=memory,\n max_credits=max_credits,\n seed_simulator=seed_simulator,\n default_qubit_los=default_qubit_los,\n default_meas_los=default_meas_los,\n schedule_los=schedule_los,\n meas_level=meas_level,\n meas_return=meas_return,\n memory_slots=memory_slots,\n memory_slot_size=memory_slot_size,\n rep_time=rep_time,\n parameter_binds=parameter_binds,\n backend=backend,\n **run_config\n )\n\n # executing the circuits on the backend and returning the job\n return backend.run(qobj, **run_config)\n", "path": "qiskit/execute.py"}]}
| 3,341 | 612 |
gh_patches_debug_13726
|
rasdani/github-patches
|
git_diff
|
ycm-core__ycmd-551
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ycmd should return valid JSON instead of empty HTML for 2 requests
/load_extra_conf_file and /ignore_extra_conf_file requests currently return an empty body
in case of success, which is not valid JSON. Instead, ycmd should return valid JSON body, for example just "true".
</issue>
<code>
[start of ycmd/handlers.py]
1 # Copyright (C) 2013 Google Inc.
2 #
3 # This file is part of ycmd.
4 #
5 # ycmd is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # ycmd is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with ycmd. If not, see <http://www.gnu.org/licenses/>.
17
18 from __future__ import absolute_import
19 from __future__ import unicode_literals
20 from __future__ import print_function
21 from __future__ import division
22 from future import standard_library
23 standard_library.install_aliases()
24 from builtins import * # noqa
25
26 import atexit
27 import bottle
28 import json
29 import logging
30 import traceback
31 from bottle import request
32
33 import ycm_core
34 from ycmd import extra_conf_store, hmac_plugin, server_state, user_options_store
35 from ycmd.responses import BuildExceptionResponse, BuildCompletionResponse
36 from ycmd.request_wrap import RequestWrap
37 from ycmd.bottle_utils import SetResponseHeader
38 from ycmd.completers.completer_utils import FilterAndSortCandidatesWrap
39
40
41 # num bytes for the request body buffer; request.json only works if the request
42 # size is less than this
43 bottle.Request.MEMFILE_MAX = 1000 * 1024
44
45 _server_state = None
46 _hmac_secret = bytes()
47 _logger = logging.getLogger( __name__ )
48 app = bottle.Bottle()
49
50
51 @app.post( '/event_notification' )
52 def EventNotification():
53 _logger.info( 'Received event notification' )
54 request_data = RequestWrap( request.json )
55 event_name = request_data[ 'event_name' ]
56 _logger.debug( 'Event name: %s', event_name )
57
58 event_handler = 'On' + event_name
59 getattr( _server_state.GetGeneralCompleter(), event_handler )( request_data )
60
61 filetypes = request_data[ 'filetypes' ]
62 response_data = None
63 if _server_state.FiletypeCompletionUsable( filetypes ):
64 response_data = getattr( _server_state.GetFiletypeCompleter( filetypes ),
65 event_handler )( request_data )
66
67 if response_data:
68 return _JsonResponse( response_data )
69 return _JsonResponse( {} )
70
71
72 @app.post( '/run_completer_command' )
73 def RunCompleterCommand():
74 _logger.info( 'Received command request' )
75 request_data = RequestWrap( request.json )
76 completer = _GetCompleterForRequestData( request_data )
77
78 return _JsonResponse( completer.OnUserCommand(
79 request_data[ 'command_arguments' ],
80 request_data ) )
81
82
83 @app.post( '/completions' )
84 def GetCompletions():
85 _logger.info( 'Received completion request' )
86 request_data = RequestWrap( request.json )
87 ( do_filetype_completion, forced_filetype_completion ) = (
88 _server_state.ShouldUseFiletypeCompleter( request_data ) )
89 _logger.debug( 'Using filetype completion: %s', do_filetype_completion )
90
91 errors = None
92 completions = None
93
94 if do_filetype_completion:
95 try:
96 completions = ( _server_state.GetFiletypeCompleter(
97 request_data[ 'filetypes' ] )
98 .ComputeCandidates( request_data ) )
99
100 except Exception as exception:
101 if forced_filetype_completion:
102 # user explicitly asked for semantic completion, so just pass the error
103 # back
104 raise
105 else:
106 # store the error to be returned with results from the identifier
107 # completer
108 stack = traceback.format_exc()
109 _logger.error( 'Exception from semantic completer (using general): ' +
110 "".join( stack ) )
111 errors = [ BuildExceptionResponse( exception, stack ) ]
112
113 if not completions and not forced_filetype_completion:
114 completions = ( _server_state.GetGeneralCompleter()
115 .ComputeCandidates( request_data ) )
116
117 return _JsonResponse(
118 BuildCompletionResponse( completions if completions else [],
119 request_data.CompletionStartColumn(),
120 errors = errors ) )
121
122
123 @app.post( '/filter_and_sort_candidates' )
124 def FilterAndSortCandidates():
125 _logger.info( 'Received filter & sort request' )
126 # Not using RequestWrap because no need and the requests coming in aren't like
127 # the usual requests we handle.
128 request_data = request.json
129
130 return _JsonResponse( FilterAndSortCandidatesWrap(
131 request_data[ 'candidates'],
132 request_data[ 'sort_property' ],
133 request_data[ 'query' ] ) )
134
135
136 @app.get( '/healthy' )
137 def GetHealthy():
138 _logger.info( 'Received health request' )
139 if request.query.include_subservers:
140 cs_completer = _server_state.GetFiletypeCompleter( ['cs'] )
141 return _JsonResponse( cs_completer.ServerIsHealthy() )
142 return _JsonResponse( True )
143
144
145 @app.get( '/ready' )
146 def GetReady():
147 _logger.info( 'Received ready request' )
148 if request.query.subserver:
149 filetype = request.query.subserver
150 return _JsonResponse( _IsSubserverReady( filetype ) )
151 if request.query.include_subservers:
152 return _JsonResponse( _IsSubserverReady( 'cs' ) )
153 return _JsonResponse( True )
154
155
156 def _IsSubserverReady( filetype ):
157 completer = _server_state.GetFiletypeCompleter( [filetype] )
158 return completer.ServerIsReady()
159
160
161 @app.post( '/semantic_completion_available' )
162 def FiletypeCompletionAvailable():
163 _logger.info( 'Received filetype completion available request' )
164 return _JsonResponse( _server_state.FiletypeCompletionAvailable(
165 RequestWrap( request.json )[ 'filetypes' ] ) )
166
167
168 @app.post( '/defined_subcommands' )
169 def DefinedSubcommands():
170 _logger.info( 'Received defined subcommands request' )
171 completer = _GetCompleterForRequestData( RequestWrap( request.json ) )
172
173 return _JsonResponse( completer.DefinedSubcommands() )
174
175
176 @app.post( '/detailed_diagnostic' )
177 def GetDetailedDiagnostic():
178 _logger.info( 'Received detailed diagnostic request' )
179 request_data = RequestWrap( request.json )
180 completer = _GetCompleterForRequestData( request_data )
181
182 return _JsonResponse( completer.GetDetailedDiagnostic( request_data ) )
183
184
185 @app.post( '/load_extra_conf_file' )
186 def LoadExtraConfFile():
187 _logger.info( 'Received extra conf load request' )
188 request_data = RequestWrap( request.json, validate = False )
189 extra_conf_store.Load( request_data[ 'filepath' ], force = True )
190
191
192 @app.post( '/ignore_extra_conf_file' )
193 def IgnoreExtraConfFile():
194 _logger.info( 'Received extra conf ignore request' )
195 request_data = RequestWrap( request.json, validate = False )
196 extra_conf_store.Disable( request_data[ 'filepath' ] )
197
198
199 @app.post( '/debug_info' )
200 def DebugInfo():
201 _logger.info( 'Received debug info request' )
202
203 output = []
204 has_clang_support = ycm_core.HasClangSupport()
205 output.append( 'Server has Clang support compiled in: {0}'.format(
206 has_clang_support ) )
207
208 if has_clang_support:
209 output.append( 'Clang version: ' + ycm_core.ClangVersion() )
210
211 request_data = RequestWrap( request.json )
212 try:
213 output.append(
214 _GetCompleterForRequestData( request_data ).DebugInfo( request_data ) )
215 except Exception:
216 _logger.debug( 'Exception in debug info request: '
217 + traceback.format_exc() )
218
219 return _JsonResponse( '\n'.join( output ) )
220
221
222 # The type of the param is Bottle.HTTPError
223 def ErrorHandler( httperror ):
224 body = _JsonResponse( BuildExceptionResponse( httperror.exception,
225 httperror.traceback ) )
226 hmac_plugin.SetHmacHeader( body, _hmac_secret )
227 return body
228
229 # For every error Bottle encounters it will use this as the default handler
230 app.default_error_handler = ErrorHandler
231
232
233 def _JsonResponse( data ):
234 SetResponseHeader( 'Content-Type', 'application/json' )
235 return json.dumps( data, default = _UniversalSerialize )
236
237
238 def _UniversalSerialize( obj ):
239 try:
240 serialized = obj.__dict__.copy()
241 serialized[ 'TYPE' ] = type( obj ).__name__
242 return serialized
243 except AttributeError:
244 return str( obj )
245
246
247 def _GetCompleterForRequestData( request_data ):
248 completer_target = request_data.get( 'completer_target', None )
249
250 if completer_target == 'identifier':
251 return _server_state.GetGeneralCompleter().GetIdentifierCompleter()
252 elif completer_target == 'filetype_default' or not completer_target:
253 return _server_state.GetFiletypeCompleter( request_data[ 'filetypes' ] )
254 else:
255 return _server_state.GetFiletypeCompleter( [ completer_target ] )
256
257
258 @atexit.register
259 def ServerShutdown():
260 _logger.info( 'Server shutting down' )
261 if _server_state:
262 _server_state.Shutdown()
263 extra_conf_store.Shutdown()
264
265
266 def SetHmacSecret( hmac_secret ):
267 global _hmac_secret
268 _hmac_secret = hmac_secret
269
270
271 def UpdateUserOptions( options ):
272 global _server_state
273
274 if not options:
275 return
276
277 # This should never be passed in, but let's try to remove it just in case.
278 options.pop( 'hmac_secret', None )
279 user_options_store.SetAll( options )
280 _server_state = server_state.ServerState( options )
281
282
283 def SetServerStateToDefaults():
284 global _server_state, _logger
285 _logger = logging.getLogger( __name__ )
286 user_options_store.LoadDefaults()
287 _server_state = server_state.ServerState( user_options_store.GetAll() )
288 extra_conf_store.Reset()
289
[end of ycmd/handlers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ycmd/handlers.py b/ycmd/handlers.py
--- a/ycmd/handlers.py
+++ b/ycmd/handlers.py
@@ -188,6 +188,8 @@
request_data = RequestWrap( request.json, validate = False )
extra_conf_store.Load( request_data[ 'filepath' ], force = True )
+ return _JsonResponse( True )
+
@app.post( '/ignore_extra_conf_file' )
def IgnoreExtraConfFile():
@@ -195,6 +197,8 @@
request_data = RequestWrap( request.json, validate = False )
extra_conf_store.Disable( request_data[ 'filepath' ] )
+ return _JsonResponse( True )
+
@app.post( '/debug_info' )
def DebugInfo():
|
{"golden_diff": "diff --git a/ycmd/handlers.py b/ycmd/handlers.py\n--- a/ycmd/handlers.py\n+++ b/ycmd/handlers.py\n@@ -188,6 +188,8 @@\n request_data = RequestWrap( request.json, validate = False )\n extra_conf_store.Load( request_data[ 'filepath' ], force = True )\n \n+ return _JsonResponse( True )\n+\n \n @app.post( '/ignore_extra_conf_file' )\n def IgnoreExtraConfFile():\n@@ -195,6 +197,8 @@\n request_data = RequestWrap( request.json, validate = False )\n extra_conf_store.Disable( request_data[ 'filepath' ] )\n \n+ return _JsonResponse( True )\n+\n \n @app.post( '/debug_info' )\n def DebugInfo():\n", "issue": "ycmd should return valid JSON instead of empty HTML for 2 requests\n/load_extra_conf_file and /ignore_extra_conf_file requests currently return an empty body\nin case of success, which is not valid JSON. Instead, ycmd should return valid JSON body, for example just \"true\".\n\n", "before_files": [{"content": "# Copyright (C) 2013 Google Inc.\n#\n# This file is part of ycmd.\n#\n# ycmd is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# ycmd is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with ycmd. If not, see <http://www.gnu.org/licenses/>.\n\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom builtins import * # noqa\n\nimport atexit\nimport bottle\nimport json\nimport logging\nimport traceback\nfrom bottle import request\n\nimport ycm_core\nfrom ycmd import extra_conf_store, hmac_plugin, server_state, user_options_store\nfrom ycmd.responses import BuildExceptionResponse, BuildCompletionResponse\nfrom ycmd.request_wrap import RequestWrap\nfrom ycmd.bottle_utils import SetResponseHeader\nfrom ycmd.completers.completer_utils import FilterAndSortCandidatesWrap\n\n\n# num bytes for the request body buffer; request.json only works if the request\n# size is less than this\nbottle.Request.MEMFILE_MAX = 1000 * 1024\n\n_server_state = None\n_hmac_secret = bytes()\n_logger = logging.getLogger( __name__ )\napp = bottle.Bottle()\n\n\[email protected]( '/event_notification' )\ndef EventNotification():\n _logger.info( 'Received event notification' )\n request_data = RequestWrap( request.json )\n event_name = request_data[ 'event_name' ]\n _logger.debug( 'Event name: %s', event_name )\n\n event_handler = 'On' + event_name\n getattr( _server_state.GetGeneralCompleter(), event_handler )( request_data )\n\n filetypes = request_data[ 'filetypes' ]\n response_data = None\n if _server_state.FiletypeCompletionUsable( filetypes ):\n response_data = getattr( _server_state.GetFiletypeCompleter( filetypes ),\n event_handler )( request_data )\n\n if response_data:\n return _JsonResponse( response_data )\n return _JsonResponse( {} )\n\n\[email protected]( '/run_completer_command' )\ndef RunCompleterCommand():\n _logger.info( 'Received command request' )\n request_data = RequestWrap( request.json )\n completer = _GetCompleterForRequestData( request_data )\n\n return _JsonResponse( completer.OnUserCommand(\n request_data[ 'command_arguments' ],\n request_data ) )\n\n\[email protected]( '/completions' )\ndef GetCompletions():\n _logger.info( 'Received completion request' )\n request_data = RequestWrap( request.json )\n ( do_filetype_completion, forced_filetype_completion ) = (\n _server_state.ShouldUseFiletypeCompleter( request_data ) )\n _logger.debug( 'Using filetype completion: %s', do_filetype_completion )\n\n errors = None\n completions = None\n\n if do_filetype_completion:\n try:\n completions = ( _server_state.GetFiletypeCompleter(\n request_data[ 'filetypes' ] )\n .ComputeCandidates( request_data ) )\n\n except Exception as exception:\n if forced_filetype_completion:\n # user explicitly asked for semantic completion, so just pass the error\n # back\n raise\n else:\n # store the error to be returned with results from the identifier\n # completer\n stack = traceback.format_exc()\n _logger.error( 'Exception from semantic completer (using general): ' +\n \"\".join( stack ) )\n errors = [ BuildExceptionResponse( exception, stack ) ]\n\n if not completions and not forced_filetype_completion:\n completions = ( _server_state.GetGeneralCompleter()\n .ComputeCandidates( request_data ) )\n\n return _JsonResponse(\n BuildCompletionResponse( completions if completions else [],\n request_data.CompletionStartColumn(),\n errors = errors ) )\n\n\[email protected]( '/filter_and_sort_candidates' )\ndef FilterAndSortCandidates():\n _logger.info( 'Received filter & sort request' )\n # Not using RequestWrap because no need and the requests coming in aren't like\n # the usual requests we handle.\n request_data = request.json\n\n return _JsonResponse( FilterAndSortCandidatesWrap(\n request_data[ 'candidates'],\n request_data[ 'sort_property' ],\n request_data[ 'query' ] ) )\n\n\[email protected]( '/healthy' )\ndef GetHealthy():\n _logger.info( 'Received health request' )\n if request.query.include_subservers:\n cs_completer = _server_state.GetFiletypeCompleter( ['cs'] )\n return _JsonResponse( cs_completer.ServerIsHealthy() )\n return _JsonResponse( True )\n\n\[email protected]( '/ready' )\ndef GetReady():\n _logger.info( 'Received ready request' )\n if request.query.subserver:\n filetype = request.query.subserver\n return _JsonResponse( _IsSubserverReady( filetype ) )\n if request.query.include_subservers:\n return _JsonResponse( _IsSubserverReady( 'cs' ) )\n return _JsonResponse( True )\n\n\ndef _IsSubserverReady( filetype ):\n completer = _server_state.GetFiletypeCompleter( [filetype] )\n return completer.ServerIsReady()\n\n\[email protected]( '/semantic_completion_available' )\ndef FiletypeCompletionAvailable():\n _logger.info( 'Received filetype completion available request' )\n return _JsonResponse( _server_state.FiletypeCompletionAvailable(\n RequestWrap( request.json )[ 'filetypes' ] ) )\n\n\[email protected]( '/defined_subcommands' )\ndef DefinedSubcommands():\n _logger.info( 'Received defined subcommands request' )\n completer = _GetCompleterForRequestData( RequestWrap( request.json ) )\n\n return _JsonResponse( completer.DefinedSubcommands() )\n\n\[email protected]( '/detailed_diagnostic' )\ndef GetDetailedDiagnostic():\n _logger.info( 'Received detailed diagnostic request' )\n request_data = RequestWrap( request.json )\n completer = _GetCompleterForRequestData( request_data )\n\n return _JsonResponse( completer.GetDetailedDiagnostic( request_data ) )\n\n\[email protected]( '/load_extra_conf_file' )\ndef LoadExtraConfFile():\n _logger.info( 'Received extra conf load request' )\n request_data = RequestWrap( request.json, validate = False )\n extra_conf_store.Load( request_data[ 'filepath' ], force = True )\n\n\[email protected]( '/ignore_extra_conf_file' )\ndef IgnoreExtraConfFile():\n _logger.info( 'Received extra conf ignore request' )\n request_data = RequestWrap( request.json, validate = False )\n extra_conf_store.Disable( request_data[ 'filepath' ] )\n\n\[email protected]( '/debug_info' )\ndef DebugInfo():\n _logger.info( 'Received debug info request' )\n\n output = []\n has_clang_support = ycm_core.HasClangSupport()\n output.append( 'Server has Clang support compiled in: {0}'.format(\n has_clang_support ) )\n\n if has_clang_support:\n output.append( 'Clang version: ' + ycm_core.ClangVersion() )\n\n request_data = RequestWrap( request.json )\n try:\n output.append(\n _GetCompleterForRequestData( request_data ).DebugInfo( request_data ) )\n except Exception:\n _logger.debug( 'Exception in debug info request: '\n + traceback.format_exc() )\n\n return _JsonResponse( '\\n'.join( output ) )\n\n\n# The type of the param is Bottle.HTTPError\ndef ErrorHandler( httperror ):\n body = _JsonResponse( BuildExceptionResponse( httperror.exception,\n httperror.traceback ) )\n hmac_plugin.SetHmacHeader( body, _hmac_secret )\n return body\n\n# For every error Bottle encounters it will use this as the default handler\napp.default_error_handler = ErrorHandler\n\n\ndef _JsonResponse( data ):\n SetResponseHeader( 'Content-Type', 'application/json' )\n return json.dumps( data, default = _UniversalSerialize )\n\n\ndef _UniversalSerialize( obj ):\n try:\n serialized = obj.__dict__.copy()\n serialized[ 'TYPE' ] = type( obj ).__name__\n return serialized\n except AttributeError:\n return str( obj )\n\n\ndef _GetCompleterForRequestData( request_data ):\n completer_target = request_data.get( 'completer_target', None )\n\n if completer_target == 'identifier':\n return _server_state.GetGeneralCompleter().GetIdentifierCompleter()\n elif completer_target == 'filetype_default' or not completer_target:\n return _server_state.GetFiletypeCompleter( request_data[ 'filetypes' ] )\n else:\n return _server_state.GetFiletypeCompleter( [ completer_target ] )\n\n\[email protected]\ndef ServerShutdown():\n _logger.info( 'Server shutting down' )\n if _server_state:\n _server_state.Shutdown()\n extra_conf_store.Shutdown()\n\n\ndef SetHmacSecret( hmac_secret ):\n global _hmac_secret\n _hmac_secret = hmac_secret\n\n\ndef UpdateUserOptions( options ):\n global _server_state\n\n if not options:\n return\n\n # This should never be passed in, but let's try to remove it just in case.\n options.pop( 'hmac_secret', None )\n user_options_store.SetAll( options )\n _server_state = server_state.ServerState( options )\n\n\ndef SetServerStateToDefaults():\n global _server_state, _logger\n _logger = logging.getLogger( __name__ )\n user_options_store.LoadDefaults()\n _server_state = server_state.ServerState( user_options_store.GetAll() )\n extra_conf_store.Reset()\n", "path": "ycmd/handlers.py"}]}
| 3,594 | 182 |
gh_patches_debug_5100
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-contrib-1748
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
httpx tests failing on httpx==0.23.1
Can we leave https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1459 open until we actually fix the instrumentation to work with that version?
_Originally posted by @aabmass in https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1460#pullrequestreview-1186403709_
</issue>
<code>
[start of opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # DO NOT EDIT. THIS FILE WAS AUTOGENERATED FROM INSTRUMENTATION PACKAGES.
16 # RUN `python scripts/generate_instrumentation_bootstrap.py` TO REGENERATE.
17
18 libraries = {
19 "aio_pika": {
20 "library": "aio_pika >= 7.2.0, < 10.0.0",
21 "instrumentation": "opentelemetry-instrumentation-aio-pika==0.40b0.dev",
22 },
23 "aiohttp": {
24 "library": "aiohttp ~= 3.0",
25 "instrumentation": "opentelemetry-instrumentation-aiohttp-client==0.40b0.dev",
26 },
27 "aiopg": {
28 "library": "aiopg >= 0.13.0, < 2.0.0",
29 "instrumentation": "opentelemetry-instrumentation-aiopg==0.40b0.dev",
30 },
31 "asgiref": {
32 "library": "asgiref ~= 3.0",
33 "instrumentation": "opentelemetry-instrumentation-asgi==0.40b0.dev",
34 },
35 "asyncpg": {
36 "library": "asyncpg >= 0.12.0",
37 "instrumentation": "opentelemetry-instrumentation-asyncpg==0.40b0.dev",
38 },
39 "boto": {
40 "library": "boto~=2.0",
41 "instrumentation": "opentelemetry-instrumentation-boto==0.40b0.dev",
42 },
43 "boto3": {
44 "library": "boto3 ~= 1.0",
45 "instrumentation": "opentelemetry-instrumentation-boto3sqs==0.40b0.dev",
46 },
47 "botocore": {
48 "library": "botocore ~= 1.0",
49 "instrumentation": "opentelemetry-instrumentation-botocore==0.40b0.dev",
50 },
51 "celery": {
52 "library": "celery >= 4.0, < 6.0",
53 "instrumentation": "opentelemetry-instrumentation-celery==0.40b0.dev",
54 },
55 "confluent-kafka": {
56 "library": "confluent-kafka >= 1.8.2, < 2.0.0",
57 "instrumentation": "opentelemetry-instrumentation-confluent-kafka==0.40b0.dev",
58 },
59 "django": {
60 "library": "django >= 1.10",
61 "instrumentation": "opentelemetry-instrumentation-django==0.40b0.dev",
62 },
63 "elasticsearch": {
64 "library": "elasticsearch >= 2.0",
65 "instrumentation": "opentelemetry-instrumentation-elasticsearch==0.40b0.dev",
66 },
67 "falcon": {
68 "library": "falcon >= 1.4.1, < 4.0.0",
69 "instrumentation": "opentelemetry-instrumentation-falcon==0.40b0.dev",
70 },
71 "fastapi": {
72 "library": "fastapi ~= 0.58",
73 "instrumentation": "opentelemetry-instrumentation-fastapi==0.40b0.dev",
74 },
75 "flask": {
76 "library": "flask >= 1.0, < 3.0",
77 "instrumentation": "opentelemetry-instrumentation-flask==0.40b0.dev",
78 },
79 "grpcio": {
80 "library": "grpcio ~= 1.27",
81 "instrumentation": "opentelemetry-instrumentation-grpc==0.40b0.dev",
82 },
83 "httpx": {
84 "library": "httpx >= 0.18.0, <= 0.23.0",
85 "instrumentation": "opentelemetry-instrumentation-httpx==0.40b0.dev",
86 },
87 "jinja2": {
88 "library": "jinja2 >= 2.7, < 4.0",
89 "instrumentation": "opentelemetry-instrumentation-jinja2==0.40b0.dev",
90 },
91 "kafka-python": {
92 "library": "kafka-python >= 2.0",
93 "instrumentation": "opentelemetry-instrumentation-kafka-python==0.40b0.dev",
94 },
95 "mysql-connector-python": {
96 "library": "mysql-connector-python ~= 8.0",
97 "instrumentation": "opentelemetry-instrumentation-mysql==0.40b0.dev",
98 },
99 "pika": {
100 "library": "pika >= 0.12.0",
101 "instrumentation": "opentelemetry-instrumentation-pika==0.40b0.dev",
102 },
103 "psycopg2": {
104 "library": "psycopg2 >= 2.7.3.1",
105 "instrumentation": "opentelemetry-instrumentation-psycopg2==0.40b0.dev",
106 },
107 "pymemcache": {
108 "library": "pymemcache >= 1.3.5, < 5",
109 "instrumentation": "opentelemetry-instrumentation-pymemcache==0.40b0.dev",
110 },
111 "pymongo": {
112 "library": "pymongo >= 3.1, < 5.0",
113 "instrumentation": "opentelemetry-instrumentation-pymongo==0.40b0.dev",
114 },
115 "PyMySQL": {
116 "library": "PyMySQL < 2",
117 "instrumentation": "opentelemetry-instrumentation-pymysql==0.40b0.dev",
118 },
119 "pyramid": {
120 "library": "pyramid >= 1.7",
121 "instrumentation": "opentelemetry-instrumentation-pyramid==0.40b0.dev",
122 },
123 "redis": {
124 "library": "redis >= 2.6",
125 "instrumentation": "opentelemetry-instrumentation-redis==0.40b0.dev",
126 },
127 "remoulade": {
128 "library": "remoulade >= 0.50",
129 "instrumentation": "opentelemetry-instrumentation-remoulade==0.40b0.dev",
130 },
131 "requests": {
132 "library": "requests ~= 2.0",
133 "instrumentation": "opentelemetry-instrumentation-requests==0.40b0.dev",
134 },
135 "scikit-learn": {
136 "library": "scikit-learn ~= 0.24.0",
137 "instrumentation": "opentelemetry-instrumentation-sklearn==0.40b0.dev",
138 },
139 "sqlalchemy": {
140 "library": "sqlalchemy",
141 "instrumentation": "opentelemetry-instrumentation-sqlalchemy==0.40b0.dev",
142 },
143 "starlette": {
144 "library": "starlette ~= 0.13.0",
145 "instrumentation": "opentelemetry-instrumentation-starlette==0.40b0.dev",
146 },
147 "psutil": {
148 "library": "psutil >= 5",
149 "instrumentation": "opentelemetry-instrumentation-system-metrics==0.40b0.dev",
150 },
151 "tornado": {
152 "library": "tornado >= 5.1.1",
153 "instrumentation": "opentelemetry-instrumentation-tornado==0.40b0.dev",
154 },
155 "tortoise-orm": {
156 "library": "tortoise-orm >= 0.17.0",
157 "instrumentation": "opentelemetry-instrumentation-tortoiseorm==0.40b0.dev",
158 },
159 "pydantic": {
160 "library": "pydantic >= 1.10.2",
161 "instrumentation": "opentelemetry-instrumentation-tortoiseorm==0.40b0.dev",
162 },
163 "urllib3": {
164 "library": "urllib3 >= 1.0.0, < 2.0.0",
165 "instrumentation": "opentelemetry-instrumentation-urllib3==0.40b0.dev",
166 },
167 }
168 default_instrumentations = [
169 "opentelemetry-instrumentation-aws-lambda==0.40b0.dev",
170 "opentelemetry-instrumentation-dbapi==0.40b0.dev",
171 "opentelemetry-instrumentation-logging==0.40b0.dev",
172 "opentelemetry-instrumentation-sqlite3==0.40b0.dev",
173 "opentelemetry-instrumentation-urllib==0.40b0.dev",
174 "opentelemetry-instrumentation-wsgi==0.40b0.dev",
175 ]
176
[end of opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py
--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py
+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py
@@ -81,7 +81,7 @@
"instrumentation": "opentelemetry-instrumentation-grpc==0.40b0.dev",
},
"httpx": {
- "library": "httpx >= 0.18.0, <= 0.23.0",
+ "library": "httpx >= 0.18.0",
"instrumentation": "opentelemetry-instrumentation-httpx==0.40b0.dev",
},
"jinja2": {
|
{"golden_diff": "diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py\n--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py\n+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py\n@@ -81,7 +81,7 @@\n \"instrumentation\": \"opentelemetry-instrumentation-grpc==0.40b0.dev\",\n },\n \"httpx\": {\n- \"library\": \"httpx >= 0.18.0, <= 0.23.0\",\n+ \"library\": \"httpx >= 0.18.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-httpx==0.40b0.dev\",\n },\n \"jinja2\": {\n", "issue": "httpx tests failing on httpx==0.23.1\n Can we leave https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1459 open until we actually fix the instrumentation to work with that version?\r\n\r\n_Originally posted by @aabmass in https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1460#pullrequestreview-1186403709_\r\n \n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# DO NOT EDIT. THIS FILE WAS AUTOGENERATED FROM INSTRUMENTATION PACKAGES.\n# RUN `python scripts/generate_instrumentation_bootstrap.py` TO REGENERATE.\n\nlibraries = {\n \"aio_pika\": {\n \"library\": \"aio_pika >= 7.2.0, < 10.0.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-aio-pika==0.40b0.dev\",\n },\n \"aiohttp\": {\n \"library\": \"aiohttp ~= 3.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-aiohttp-client==0.40b0.dev\",\n },\n \"aiopg\": {\n \"library\": \"aiopg >= 0.13.0, < 2.0.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-aiopg==0.40b0.dev\",\n },\n \"asgiref\": {\n \"library\": \"asgiref ~= 3.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-asgi==0.40b0.dev\",\n },\n \"asyncpg\": {\n \"library\": \"asyncpg >= 0.12.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-asyncpg==0.40b0.dev\",\n },\n \"boto\": {\n \"library\": \"boto~=2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-boto==0.40b0.dev\",\n },\n \"boto3\": {\n \"library\": \"boto3 ~= 1.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-boto3sqs==0.40b0.dev\",\n },\n \"botocore\": {\n \"library\": \"botocore ~= 1.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-botocore==0.40b0.dev\",\n },\n \"celery\": {\n \"library\": \"celery >= 4.0, < 6.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-celery==0.40b0.dev\",\n },\n \"confluent-kafka\": {\n \"library\": \"confluent-kafka >= 1.8.2, < 2.0.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-confluent-kafka==0.40b0.dev\",\n },\n \"django\": {\n \"library\": \"django >= 1.10\",\n \"instrumentation\": \"opentelemetry-instrumentation-django==0.40b0.dev\",\n },\n \"elasticsearch\": {\n \"library\": \"elasticsearch >= 2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-elasticsearch==0.40b0.dev\",\n },\n \"falcon\": {\n \"library\": \"falcon >= 1.4.1, < 4.0.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-falcon==0.40b0.dev\",\n },\n \"fastapi\": {\n \"library\": \"fastapi ~= 0.58\",\n \"instrumentation\": \"opentelemetry-instrumentation-fastapi==0.40b0.dev\",\n },\n \"flask\": {\n \"library\": \"flask >= 1.0, < 3.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-flask==0.40b0.dev\",\n },\n \"grpcio\": {\n \"library\": \"grpcio ~= 1.27\",\n \"instrumentation\": \"opentelemetry-instrumentation-grpc==0.40b0.dev\",\n },\n \"httpx\": {\n \"library\": \"httpx >= 0.18.0, <= 0.23.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-httpx==0.40b0.dev\",\n },\n \"jinja2\": {\n \"library\": \"jinja2 >= 2.7, < 4.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-jinja2==0.40b0.dev\",\n },\n \"kafka-python\": {\n \"library\": \"kafka-python >= 2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-kafka-python==0.40b0.dev\",\n },\n \"mysql-connector-python\": {\n \"library\": \"mysql-connector-python ~= 8.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-mysql==0.40b0.dev\",\n },\n \"pika\": {\n \"library\": \"pika >= 0.12.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-pika==0.40b0.dev\",\n },\n \"psycopg2\": {\n \"library\": \"psycopg2 >= 2.7.3.1\",\n \"instrumentation\": \"opentelemetry-instrumentation-psycopg2==0.40b0.dev\",\n },\n \"pymemcache\": {\n \"library\": \"pymemcache >= 1.3.5, < 5\",\n \"instrumentation\": \"opentelemetry-instrumentation-pymemcache==0.40b0.dev\",\n },\n \"pymongo\": {\n \"library\": \"pymongo >= 3.1, < 5.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-pymongo==0.40b0.dev\",\n },\n \"PyMySQL\": {\n \"library\": \"PyMySQL < 2\",\n \"instrumentation\": \"opentelemetry-instrumentation-pymysql==0.40b0.dev\",\n },\n \"pyramid\": {\n \"library\": \"pyramid >= 1.7\",\n \"instrumentation\": \"opentelemetry-instrumentation-pyramid==0.40b0.dev\",\n },\n \"redis\": {\n \"library\": \"redis >= 2.6\",\n \"instrumentation\": \"opentelemetry-instrumentation-redis==0.40b0.dev\",\n },\n \"remoulade\": {\n \"library\": \"remoulade >= 0.50\",\n \"instrumentation\": \"opentelemetry-instrumentation-remoulade==0.40b0.dev\",\n },\n \"requests\": {\n \"library\": \"requests ~= 2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-requests==0.40b0.dev\",\n },\n \"scikit-learn\": {\n \"library\": \"scikit-learn ~= 0.24.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-sklearn==0.40b0.dev\",\n },\n \"sqlalchemy\": {\n \"library\": \"sqlalchemy\",\n \"instrumentation\": \"opentelemetry-instrumentation-sqlalchemy==0.40b0.dev\",\n },\n \"starlette\": {\n \"library\": \"starlette ~= 0.13.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-starlette==0.40b0.dev\",\n },\n \"psutil\": {\n \"library\": \"psutil >= 5\",\n \"instrumentation\": \"opentelemetry-instrumentation-system-metrics==0.40b0.dev\",\n },\n \"tornado\": {\n \"library\": \"tornado >= 5.1.1\",\n \"instrumentation\": \"opentelemetry-instrumentation-tornado==0.40b0.dev\",\n },\n \"tortoise-orm\": {\n \"library\": \"tortoise-orm >= 0.17.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-tortoiseorm==0.40b0.dev\",\n },\n \"pydantic\": {\n \"library\": \"pydantic >= 1.10.2\",\n \"instrumentation\": \"opentelemetry-instrumentation-tortoiseorm==0.40b0.dev\",\n },\n \"urllib3\": {\n \"library\": \"urllib3 >= 1.0.0, < 2.0.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-urllib3==0.40b0.dev\",\n },\n}\ndefault_instrumentations = [\n \"opentelemetry-instrumentation-aws-lambda==0.40b0.dev\",\n \"opentelemetry-instrumentation-dbapi==0.40b0.dev\",\n \"opentelemetry-instrumentation-logging==0.40b0.dev\",\n \"opentelemetry-instrumentation-sqlite3==0.40b0.dev\",\n \"opentelemetry-instrumentation-urllib==0.40b0.dev\",\n \"opentelemetry-instrumentation-wsgi==0.40b0.dev\",\n]\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py"}]}
| 3,190 | 194 |
gh_patches_debug_24865
|
rasdani/github-patches
|
git_diff
|
googleapis__google-cloud-python-10134
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Language: typo ("spech") in Natural Language samples
#### Environment details
N/A
#### Steps to reproduce
N/A
#### Code example
Comments say "Parts of spech" instead of "Parts of speech"
https://github.com/googleapis/google-cloud-python/blob/master/language/samples/v1/language_syntax_gcs.py#L67
https://github.com/googleapis/google-cloud-python/blob/master/language/samples/v1/language_syntax_text.py#L66
</issue>
<code>
[start of language/samples/v1/language_syntax_gcs.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright 2019 Google LLC
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # https://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 # DO NOT EDIT! This is a generated sample ("Request", "language_syntax_gcs")
18
19 # To install the latest published package dependency, execute the following:
20 # pip install google-cloud-language
21
22 # sample-metadata
23 # title: Analyzing Syntax (GCS)
24 # description: Analyzing Syntax in text file stored in Cloud Storage
25 # usage: python3 samples/v1/language_syntax_gcs.py [--gcs_content_uri "gs://cloud-samples-data/language/syntax-sentence.txt"]
26
27 # [START language_syntax_gcs]
28 from google.cloud import language_v1
29 from google.cloud.language_v1 import enums
30
31
32 def sample_analyze_syntax(gcs_content_uri):
33 """
34 Analyzing Syntax in text file stored in Cloud Storage
35
36 Args:
37 gcs_content_uri Google Cloud Storage URI where the file content is located.
38 e.g. gs://[Your Bucket]/[Path to File]
39 """
40
41 client = language_v1.LanguageServiceClient()
42
43 # gcs_content_uri = 'gs://cloud-samples-data/language/syntax-sentence.txt'
44
45 # Available types: PLAIN_TEXT, HTML
46 type_ = enums.Document.Type.PLAIN_TEXT
47
48 # Optional. If not specified, the language is automatically detected.
49 # For list of supported languages:
50 # https://cloud.google.com/natural-language/docs/languages
51 language = "en"
52 document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language}
53
54 # Available values: NONE, UTF8, UTF16, UTF32
55 encoding_type = enums.EncodingType.UTF8
56
57 response = client.analyze_syntax(document, encoding_type=encoding_type)
58 # Loop through tokens returned from the API
59 for token in response.tokens:
60 # Get the text content of this token. Usually a word or punctuation.
61 text = token.text
62 print(u"Token text: {}".format(text.content))
63 print(
64 u"Location of this token in overall document: {}".format(text.begin_offset)
65 )
66 # Get the part of speech information for this token.
67 # Parts of spech are as defined in:
68 # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf
69 part_of_speech = token.part_of_speech
70 # Get the tag, e.g. NOUN, ADJ for Adjective, et al.
71 print(
72 u"Part of Speech tag: {}".format(
73 enums.PartOfSpeech.Tag(part_of_speech.tag).name
74 )
75 )
76 # Get the voice, e.g. ACTIVE or PASSIVE
77 print(u"Voice: {}".format(enums.PartOfSpeech.Voice(part_of_speech.voice).name))
78 # Get the tense, e.g. PAST, FUTURE, PRESENT, et al.
79 print(u"Tense: {}".format(enums.PartOfSpeech.Tense(part_of_speech.tense).name))
80 # See API reference for additional Part of Speech information available
81 # Get the lemma of the token. Wikipedia lemma description
82 # https://en.wikipedia.org/wiki/Lemma_(morphology)
83 print(u"Lemma: {}".format(token.lemma))
84 # Get the dependency tree parse information for this token.
85 # For more information on dependency labels:
86 # http://www.aclweb.org/anthology/P13-2017
87 dependency_edge = token.dependency_edge
88 print(u"Head token index: {}".format(dependency_edge.head_token_index))
89 print(
90 u"Label: {}".format(enums.DependencyEdge.Label(dependency_edge.label).name)
91 )
92
93 # Get the language of the text, which will be the same as
94 # the language specified in the request or, if not specified,
95 # the automatically-detected language.
96 print(u"Language of the text: {}".format(response.language))
97
98
99 # [END language_syntax_gcs]
100
101
102 def main():
103 import argparse
104
105 parser = argparse.ArgumentParser()
106 parser.add_argument(
107 "--gcs_content_uri",
108 type=str,
109 default="gs://cloud-samples-data/language/syntax-sentence.txt",
110 )
111 args = parser.parse_args()
112
113 sample_analyze_syntax(args.gcs_content_uri)
114
115
116 if __name__ == "__main__":
117 main()
118
[end of language/samples/v1/language_syntax_gcs.py]
[start of language/samples/v1/language_syntax_text.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright 2019 Google LLC
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # https://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 # DO NOT EDIT! This is a generated sample ("Request", "language_syntax_text")
18
19 # To install the latest published package dependency, execute the following:
20 # pip install google-cloud-language
21
22 # sample-metadata
23 # title: Analyzing Syntax
24 # description: Analyzing Syntax in a String
25 # usage: python3 samples/v1/language_syntax_text.py [--text_content "This is a short sentence."]
26
27 # [START language_syntax_text]
28 from google.cloud import language_v1
29 from google.cloud.language_v1 import enums
30
31
32 def sample_analyze_syntax(text_content):
33 """
34 Analyzing Syntax in a String
35
36 Args:
37 text_content The text content to analyze
38 """
39
40 client = language_v1.LanguageServiceClient()
41
42 # text_content = 'This is a short sentence.'
43
44 # Available types: PLAIN_TEXT, HTML
45 type_ = enums.Document.Type.PLAIN_TEXT
46
47 # Optional. If not specified, the language is automatically detected.
48 # For list of supported languages:
49 # https://cloud.google.com/natural-language/docs/languages
50 language = "en"
51 document = {"content": text_content, "type": type_, "language": language}
52
53 # Available values: NONE, UTF8, UTF16, UTF32
54 encoding_type = enums.EncodingType.UTF8
55
56 response = client.analyze_syntax(document, encoding_type=encoding_type)
57 # Loop through tokens returned from the API
58 for token in response.tokens:
59 # Get the text content of this token. Usually a word or punctuation.
60 text = token.text
61 print(u"Token text: {}".format(text.content))
62 print(
63 u"Location of this token in overall document: {}".format(text.begin_offset)
64 )
65 # Get the part of speech information for this token.
66 # Parts of spech are as defined in:
67 # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf
68 part_of_speech = token.part_of_speech
69 # Get the tag, e.g. NOUN, ADJ for Adjective, et al.
70 print(
71 u"Part of Speech tag: {}".format(
72 enums.PartOfSpeech.Tag(part_of_speech.tag).name
73 )
74 )
75 # Get the voice, e.g. ACTIVE or PASSIVE
76 print(u"Voice: {}".format(enums.PartOfSpeech.Voice(part_of_speech.voice).name))
77 # Get the tense, e.g. PAST, FUTURE, PRESENT, et al.
78 print(u"Tense: {}".format(enums.PartOfSpeech.Tense(part_of_speech.tense).name))
79 # See API reference for additional Part of Speech information available
80 # Get the lemma of the token. Wikipedia lemma description
81 # https://en.wikipedia.org/wiki/Lemma_(morphology)
82 print(u"Lemma: {}".format(token.lemma))
83 # Get the dependency tree parse information for this token.
84 # For more information on dependency labels:
85 # http://www.aclweb.org/anthology/P13-2017
86 dependency_edge = token.dependency_edge
87 print(u"Head token index: {}".format(dependency_edge.head_token_index))
88 print(
89 u"Label: {}".format(enums.DependencyEdge.Label(dependency_edge.label).name)
90 )
91
92 # Get the language of the text, which will be the same as
93 # the language specified in the request or, if not specified,
94 # the automatically-detected language.
95 print(u"Language of the text: {}".format(response.language))
96
97
98 # [END language_syntax_text]
99
100
101 def main():
102 import argparse
103
104 parser = argparse.ArgumentParser()
105 parser.add_argument("--text_content", type=str, default="This is a short sentence.")
106 args = parser.parse_args()
107
108 sample_analyze_syntax(args.text_content)
109
110
111 if __name__ == "__main__":
112 main()
113
[end of language/samples/v1/language_syntax_text.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/language/samples/v1/language_syntax_gcs.py b/language/samples/v1/language_syntax_gcs.py
--- a/language/samples/v1/language_syntax_gcs.py
+++ b/language/samples/v1/language_syntax_gcs.py
@@ -64,7 +64,7 @@
u"Location of this token in overall document: {}".format(text.begin_offset)
)
# Get the part of speech information for this token.
- # Parts of spech are as defined in:
+ # Parts of speech are as defined in:
# http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf
part_of_speech = token.part_of_speech
# Get the tag, e.g. NOUN, ADJ for Adjective, et al.
diff --git a/language/samples/v1/language_syntax_text.py b/language/samples/v1/language_syntax_text.py
--- a/language/samples/v1/language_syntax_text.py
+++ b/language/samples/v1/language_syntax_text.py
@@ -63,7 +63,7 @@
u"Location of this token in overall document: {}".format(text.begin_offset)
)
# Get the part of speech information for this token.
- # Parts of spech are as defined in:
+ # Parts of speech are as defined in:
# http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf
part_of_speech = token.part_of_speech
# Get the tag, e.g. NOUN, ADJ for Adjective, et al.
|
{"golden_diff": "diff --git a/language/samples/v1/language_syntax_gcs.py b/language/samples/v1/language_syntax_gcs.py\n--- a/language/samples/v1/language_syntax_gcs.py\n+++ b/language/samples/v1/language_syntax_gcs.py\n@@ -64,7 +64,7 @@\n u\"Location of this token in overall document: {}\".format(text.begin_offset)\n )\n # Get the part of speech information for this token.\n- # Parts of spech are as defined in:\n+ # Parts of speech are as defined in:\n # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf\n part_of_speech = token.part_of_speech\n # Get the tag, e.g. NOUN, ADJ for Adjective, et al.\ndiff --git a/language/samples/v1/language_syntax_text.py b/language/samples/v1/language_syntax_text.py\n--- a/language/samples/v1/language_syntax_text.py\n+++ b/language/samples/v1/language_syntax_text.py\n@@ -63,7 +63,7 @@\n u\"Location of this token in overall document: {}\".format(text.begin_offset)\n )\n # Get the part of speech information for this token.\n- # Parts of spech are as defined in:\n+ # Parts of speech are as defined in:\n # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf\n part_of_speech = token.part_of_speech\n # Get the tag, e.g. NOUN, ADJ for Adjective, et al.\n", "issue": "Language: typo (\"spech\") in Natural Language samples\n#### Environment details\r\n\r\nN/A\r\n\r\n#### Steps to reproduce\r\n\r\nN/A\r\n\r\n#### Code example\r\n\r\nComments say \"Parts of spech\" instead of \"Parts of speech\"\r\nhttps://github.com/googleapis/google-cloud-python/blob/master/language/samples/v1/language_syntax_gcs.py#L67\r\nhttps://github.com/googleapis/google-cloud-python/blob/master/language/samples/v1/language_syntax_text.py#L66\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# DO NOT EDIT! This is a generated sample (\"Request\", \"language_syntax_gcs\")\n\n# To install the latest published package dependency, execute the following:\n# pip install google-cloud-language\n\n# sample-metadata\n# title: Analyzing Syntax (GCS)\n# description: Analyzing Syntax in text file stored in Cloud Storage\n# usage: python3 samples/v1/language_syntax_gcs.py [--gcs_content_uri \"gs://cloud-samples-data/language/syntax-sentence.txt\"]\n\n# [START language_syntax_gcs]\nfrom google.cloud import language_v1\nfrom google.cloud.language_v1 import enums\n\n\ndef sample_analyze_syntax(gcs_content_uri):\n \"\"\"\n Analyzing Syntax in text file stored in Cloud Storage\n\n Args:\n gcs_content_uri Google Cloud Storage URI where the file content is located.\n e.g. gs://[Your Bucket]/[Path to File]\n \"\"\"\n\n client = language_v1.LanguageServiceClient()\n\n # gcs_content_uri = 'gs://cloud-samples-data/language/syntax-sentence.txt'\n\n # Available types: PLAIN_TEXT, HTML\n type_ = enums.Document.Type.PLAIN_TEXT\n\n # Optional. If not specified, the language is automatically detected.\n # For list of supported languages:\n # https://cloud.google.com/natural-language/docs/languages\n language = \"en\"\n document = {\"gcs_content_uri\": gcs_content_uri, \"type\": type_, \"language\": language}\n\n # Available values: NONE, UTF8, UTF16, UTF32\n encoding_type = enums.EncodingType.UTF8\n\n response = client.analyze_syntax(document, encoding_type=encoding_type)\n # Loop through tokens returned from the API\n for token in response.tokens:\n # Get the text content of this token. Usually a word or punctuation.\n text = token.text\n print(u\"Token text: {}\".format(text.content))\n print(\n u\"Location of this token in overall document: {}\".format(text.begin_offset)\n )\n # Get the part of speech information for this token.\n # Parts of spech are as defined in:\n # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf\n part_of_speech = token.part_of_speech\n # Get the tag, e.g. NOUN, ADJ for Adjective, et al.\n print(\n u\"Part of Speech tag: {}\".format(\n enums.PartOfSpeech.Tag(part_of_speech.tag).name\n )\n )\n # Get the voice, e.g. ACTIVE or PASSIVE\n print(u\"Voice: {}\".format(enums.PartOfSpeech.Voice(part_of_speech.voice).name))\n # Get the tense, e.g. PAST, FUTURE, PRESENT, et al.\n print(u\"Tense: {}\".format(enums.PartOfSpeech.Tense(part_of_speech.tense).name))\n # See API reference for additional Part of Speech information available\n # Get the lemma of the token. Wikipedia lemma description\n # https://en.wikipedia.org/wiki/Lemma_(morphology)\n print(u\"Lemma: {}\".format(token.lemma))\n # Get the dependency tree parse information for this token.\n # For more information on dependency labels:\n # http://www.aclweb.org/anthology/P13-2017\n dependency_edge = token.dependency_edge\n print(u\"Head token index: {}\".format(dependency_edge.head_token_index))\n print(\n u\"Label: {}\".format(enums.DependencyEdge.Label(dependency_edge.label).name)\n )\n\n # Get the language of the text, which will be the same as\n # the language specified in the request or, if not specified,\n # the automatically-detected language.\n print(u\"Language of the text: {}\".format(response.language))\n\n\n# [END language_syntax_gcs]\n\n\ndef main():\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--gcs_content_uri\",\n type=str,\n default=\"gs://cloud-samples-data/language/syntax-sentence.txt\",\n )\n args = parser.parse_args()\n\n sample_analyze_syntax(args.gcs_content_uri)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "language/samples/v1/language_syntax_gcs.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# DO NOT EDIT! This is a generated sample (\"Request\", \"language_syntax_text\")\n\n# To install the latest published package dependency, execute the following:\n# pip install google-cloud-language\n\n# sample-metadata\n# title: Analyzing Syntax\n# description: Analyzing Syntax in a String\n# usage: python3 samples/v1/language_syntax_text.py [--text_content \"This is a short sentence.\"]\n\n# [START language_syntax_text]\nfrom google.cloud import language_v1\nfrom google.cloud.language_v1 import enums\n\n\ndef sample_analyze_syntax(text_content):\n \"\"\"\n Analyzing Syntax in a String\n\n Args:\n text_content The text content to analyze\n \"\"\"\n\n client = language_v1.LanguageServiceClient()\n\n # text_content = 'This is a short sentence.'\n\n # Available types: PLAIN_TEXT, HTML\n type_ = enums.Document.Type.PLAIN_TEXT\n\n # Optional. If not specified, the language is automatically detected.\n # For list of supported languages:\n # https://cloud.google.com/natural-language/docs/languages\n language = \"en\"\n document = {\"content\": text_content, \"type\": type_, \"language\": language}\n\n # Available values: NONE, UTF8, UTF16, UTF32\n encoding_type = enums.EncodingType.UTF8\n\n response = client.analyze_syntax(document, encoding_type=encoding_type)\n # Loop through tokens returned from the API\n for token in response.tokens:\n # Get the text content of this token. Usually a word or punctuation.\n text = token.text\n print(u\"Token text: {}\".format(text.content))\n print(\n u\"Location of this token in overall document: {}\".format(text.begin_offset)\n )\n # Get the part of speech information for this token.\n # Parts of spech are as defined in:\n # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf\n part_of_speech = token.part_of_speech\n # Get the tag, e.g. NOUN, ADJ for Adjective, et al.\n print(\n u\"Part of Speech tag: {}\".format(\n enums.PartOfSpeech.Tag(part_of_speech.tag).name\n )\n )\n # Get the voice, e.g. ACTIVE or PASSIVE\n print(u\"Voice: {}\".format(enums.PartOfSpeech.Voice(part_of_speech.voice).name))\n # Get the tense, e.g. PAST, FUTURE, PRESENT, et al.\n print(u\"Tense: {}\".format(enums.PartOfSpeech.Tense(part_of_speech.tense).name))\n # See API reference for additional Part of Speech information available\n # Get the lemma of the token. Wikipedia lemma description\n # https://en.wikipedia.org/wiki/Lemma_(morphology)\n print(u\"Lemma: {}\".format(token.lemma))\n # Get the dependency tree parse information for this token.\n # For more information on dependency labels:\n # http://www.aclweb.org/anthology/P13-2017\n dependency_edge = token.dependency_edge\n print(u\"Head token index: {}\".format(dependency_edge.head_token_index))\n print(\n u\"Label: {}\".format(enums.DependencyEdge.Label(dependency_edge.label).name)\n )\n\n # Get the language of the text, which will be the same as\n # the language specified in the request or, if not specified,\n # the automatically-detected language.\n print(u\"Language of the text: {}\".format(response.language))\n\n\n# [END language_syntax_text]\n\n\ndef main():\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--text_content\", type=str, default=\"This is a short sentence.\")\n args = parser.parse_args()\n\n sample_analyze_syntax(args.text_content)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "language/samples/v1/language_syntax_text.py"}]}
| 3,189 | 352 |
gh_patches_debug_10108
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-4074
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support pathlib.Path in FEED_URI
Make things work the same when the value assigned to the `FEED_URI` setting is a string containing a path or an instance of `pathlib.Path`.
</issue>
<code>
[start of scrapy/extensions/feedexport.py]
1 """
2 Feed Exports extension
3
4 See documentation in docs/topics/feed-exports.rst
5 """
6
7 import os
8 import sys
9 import logging
10 import posixpath
11 from tempfile import NamedTemporaryFile
12 from datetime import datetime
13 import six
14 from six.moves.urllib.parse import urlparse, unquote
15 from ftplib import FTP
16
17 from zope.interface import Interface, implementer
18 from twisted.internet import defer, threads
19 from w3lib.url import file_uri_to_path
20
21 from scrapy import signals
22 from scrapy.utils.ftp import ftp_makedirs_cwd
23 from scrapy.exceptions import NotConfigured
24 from scrapy.utils.misc import create_instance, load_object
25 from scrapy.utils.log import failure_to_exc_info
26 from scrapy.utils.python import without_none_values
27 from scrapy.utils.boto import is_botocore
28
29 logger = logging.getLogger(__name__)
30
31
32 class IFeedStorage(Interface):
33 """Interface that all Feed Storages must implement"""
34
35 def __init__(uri):
36 """Initialize the storage with the parameters given in the URI"""
37
38 def open(spider):
39 """Open the storage for the given spider. It must return a file-like
40 object that will be used for the exporters"""
41
42 def store(file):
43 """Store the given file stream"""
44
45
46 @implementer(IFeedStorage)
47 class BlockingFeedStorage(object):
48
49 def open(self, spider):
50 path = spider.crawler.settings['FEED_TEMPDIR']
51 if path and not os.path.isdir(path):
52 raise OSError('Not a Directory: ' + str(path))
53
54 return NamedTemporaryFile(prefix='feed-', dir=path)
55
56 def store(self, file):
57 return threads.deferToThread(self._store_in_thread, file)
58
59 def _store_in_thread(self, file):
60 raise NotImplementedError
61
62
63 @implementer(IFeedStorage)
64 class StdoutFeedStorage(object):
65
66 def __init__(self, uri, _stdout=None):
67 if not _stdout:
68 _stdout = sys.stdout if six.PY2 else sys.stdout.buffer
69 self._stdout = _stdout
70
71 def open(self, spider):
72 return self._stdout
73
74 def store(self, file):
75 pass
76
77
78 @implementer(IFeedStorage)
79 class FileFeedStorage(object):
80
81 def __init__(self, uri):
82 self.path = file_uri_to_path(uri)
83
84 def open(self, spider):
85 dirname = os.path.dirname(self.path)
86 if dirname and not os.path.exists(dirname):
87 os.makedirs(dirname)
88 return open(self.path, 'ab')
89
90 def store(self, file):
91 file.close()
92
93
94 class S3FeedStorage(BlockingFeedStorage):
95
96 def __init__(self, uri, access_key=None, secret_key=None, acl=None):
97 # BEGIN Backward compatibility for initialising without keys (and
98 # without using from_crawler)
99 no_defaults = access_key is None and secret_key is None
100 if no_defaults:
101 from scrapy.utils.project import get_project_settings
102 settings = get_project_settings()
103 if 'AWS_ACCESS_KEY_ID' in settings or 'AWS_SECRET_ACCESS_KEY' in settings:
104 import warnings
105 from scrapy.exceptions import ScrapyDeprecationWarning
106 warnings.warn(
107 "Initialising `scrapy.extensions.feedexport.S3FeedStorage` "
108 "without AWS keys is deprecated. Please supply credentials or "
109 "use the `from_crawler()` constructor.",
110 category=ScrapyDeprecationWarning,
111 stacklevel=2
112 )
113 access_key = settings['AWS_ACCESS_KEY_ID']
114 secret_key = settings['AWS_SECRET_ACCESS_KEY']
115 # END Backward compatibility
116 u = urlparse(uri)
117 self.bucketname = u.hostname
118 self.access_key = u.username or access_key
119 self.secret_key = u.password or secret_key
120 self.is_botocore = is_botocore()
121 self.keyname = u.path[1:] # remove first "/"
122 self.acl = acl
123 if self.is_botocore:
124 import botocore.session
125 session = botocore.session.get_session()
126 self.s3_client = session.create_client(
127 's3', aws_access_key_id=self.access_key,
128 aws_secret_access_key=self.secret_key)
129 else:
130 import boto
131 self.connect_s3 = boto.connect_s3
132
133 @classmethod
134 def from_crawler(cls, crawler, uri):
135 return cls(
136 uri=uri,
137 access_key=crawler.settings['AWS_ACCESS_KEY_ID'],
138 secret_key=crawler.settings['AWS_SECRET_ACCESS_KEY'],
139 acl=crawler.settings['FEED_STORAGE_S3_ACL'] or None
140 )
141
142 def _store_in_thread(self, file):
143 file.seek(0)
144 if self.is_botocore:
145 kwargs = {'ACL': self.acl} if self.acl else {}
146 self.s3_client.put_object(
147 Bucket=self.bucketname, Key=self.keyname, Body=file,
148 **kwargs)
149 else:
150 conn = self.connect_s3(self.access_key, self.secret_key)
151 bucket = conn.get_bucket(self.bucketname, validate=False)
152 key = bucket.new_key(self.keyname)
153 kwargs = {'policy': self.acl} if self.acl else {}
154 key.set_contents_from_file(file, **kwargs)
155 key.close()
156
157
158 class FTPFeedStorage(BlockingFeedStorage):
159
160 def __init__(self, uri, use_active_mode=False):
161 u = urlparse(uri)
162 self.host = u.hostname
163 self.port = int(u.port or '21')
164 self.username = u.username
165 self.password = unquote(u.password)
166 self.path = u.path
167 self.use_active_mode = use_active_mode
168
169 @classmethod
170 def from_crawler(cls, crawler, uri):
171 return cls(
172 uri=uri,
173 use_active_mode=crawler.settings.getbool('FEED_STORAGE_FTP_ACTIVE')
174 )
175
176 def _store_in_thread(self, file):
177 file.seek(0)
178 ftp = FTP()
179 ftp.connect(self.host, self.port)
180 ftp.login(self.username, self.password)
181 if self.use_active_mode:
182 ftp.set_pasv(False)
183 dirname, filename = posixpath.split(self.path)
184 ftp_makedirs_cwd(ftp, dirname)
185 ftp.storbinary('STOR %s' % filename, file)
186 ftp.quit()
187
188
189 class SpiderSlot(object):
190 def __init__(self, file, exporter, storage, uri):
191 self.file = file
192 self.exporter = exporter
193 self.storage = storage
194 self.uri = uri
195 self.itemcount = 0
196
197
198 class FeedExporter(object):
199
200 def __init__(self, settings):
201 self.settings = settings
202 self.urifmt = settings['FEED_URI']
203 if not self.urifmt:
204 raise NotConfigured
205 self.format = settings['FEED_FORMAT'].lower()
206 self.export_encoding = settings['FEED_EXPORT_ENCODING']
207 self.storages = self._load_components('FEED_STORAGES')
208 self.exporters = self._load_components('FEED_EXPORTERS')
209 if not self._storage_supported(self.urifmt):
210 raise NotConfigured
211 if not self._exporter_supported(self.format):
212 raise NotConfigured
213 self.store_empty = settings.getbool('FEED_STORE_EMPTY')
214 self._exporting = False
215 self.export_fields = settings.getlist('FEED_EXPORT_FIELDS') or None
216 self.indent = None
217 if settings.get('FEED_EXPORT_INDENT') is not None:
218 self.indent = settings.getint('FEED_EXPORT_INDENT')
219 uripar = settings['FEED_URI_PARAMS']
220 self._uripar = load_object(uripar) if uripar else lambda x, y: None
221
222 @classmethod
223 def from_crawler(cls, crawler):
224 o = cls(crawler.settings)
225 o.crawler = crawler
226 crawler.signals.connect(o.open_spider, signals.spider_opened)
227 crawler.signals.connect(o.close_spider, signals.spider_closed)
228 crawler.signals.connect(o.item_scraped, signals.item_scraped)
229 return o
230
231 def open_spider(self, spider):
232 uri = self.urifmt % self._get_uri_params(spider)
233 storage = self._get_storage(uri)
234 file = storage.open(spider)
235 exporter = self._get_exporter(file, fields_to_export=self.export_fields,
236 encoding=self.export_encoding, indent=self.indent)
237 if self.store_empty:
238 exporter.start_exporting()
239 self._exporting = True
240 self.slot = SpiderSlot(file, exporter, storage, uri)
241
242 def close_spider(self, spider):
243 slot = self.slot
244 if not slot.itemcount and not self.store_empty:
245 # We need to call slot.storage.store nonetheless to get the file
246 # properly closed.
247 return defer.maybeDeferred(slot.storage.store, slot.file)
248 if self._exporting:
249 slot.exporter.finish_exporting()
250 self._exporting = False
251 logfmt = "%s %%(format)s feed (%%(itemcount)d items) in: %%(uri)s"
252 log_args = {'format': self.format,
253 'itemcount': slot.itemcount,
254 'uri': slot.uri}
255 d = defer.maybeDeferred(slot.storage.store, slot.file)
256 d.addCallback(lambda _: logger.info(logfmt % "Stored", log_args,
257 extra={'spider': spider}))
258 d.addErrback(lambda f: logger.error(logfmt % "Error storing", log_args,
259 exc_info=failure_to_exc_info(f),
260 extra={'spider': spider}))
261 return d
262
263 def item_scraped(self, item, spider):
264 slot = self.slot
265 if not self._exporting:
266 slot.exporter.start_exporting()
267 self._exporting = True
268 slot.exporter.export_item(item)
269 slot.itemcount += 1
270 return item
271
272 def _load_components(self, setting_prefix):
273 conf = without_none_values(self.settings.getwithbase(setting_prefix))
274 d = {}
275 for k, v in conf.items():
276 try:
277 d[k] = load_object(v)
278 except NotConfigured:
279 pass
280 return d
281
282 def _exporter_supported(self, format):
283 if format in self.exporters:
284 return True
285 logger.error("Unknown feed format: %(format)s", {'format': format})
286
287 def _storage_supported(self, uri):
288 scheme = urlparse(uri).scheme
289 if scheme in self.storages:
290 try:
291 self._get_storage(uri)
292 return True
293 except NotConfigured as e:
294 logger.error("Disabled feed storage scheme: %(scheme)s. "
295 "Reason: %(reason)s",
296 {'scheme': scheme, 'reason': str(e)})
297 else:
298 logger.error("Unknown feed storage scheme: %(scheme)s",
299 {'scheme': scheme})
300
301 def _get_instance(self, objcls, *args, **kwargs):
302 return create_instance(
303 objcls, self.settings, getattr(self, 'crawler', None),
304 *args, **kwargs)
305
306 def _get_exporter(self, *args, **kwargs):
307 return self._get_instance(self.exporters[self.format], *args, **kwargs)
308
309 def _get_storage(self, uri):
310 return self._get_instance(self.storages[urlparse(uri).scheme], uri)
311
312 def _get_uri_params(self, spider):
313 params = {}
314 for k in dir(spider):
315 params[k] = getattr(spider, k)
316 ts = datetime.utcnow().replace(microsecond=0).isoformat().replace(':', '-')
317 params['time'] = ts
318 self._uripar(params, spider)
319 return params
320
[end of scrapy/extensions/feedexport.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scrapy/extensions/feedexport.py b/scrapy/extensions/feedexport.py
--- a/scrapy/extensions/feedexport.py
+++ b/scrapy/extensions/feedexport.py
@@ -199,9 +199,9 @@
def __init__(self, settings):
self.settings = settings
- self.urifmt = settings['FEED_URI']
- if not self.urifmt:
+ if not settings['FEED_URI']:
raise NotConfigured
+ self.urifmt = str(settings['FEED_URI'])
self.format = settings['FEED_FORMAT'].lower()
self.export_encoding = settings['FEED_EXPORT_ENCODING']
self.storages = self._load_components('FEED_STORAGES')
|
{"golden_diff": "diff --git a/scrapy/extensions/feedexport.py b/scrapy/extensions/feedexport.py\n--- a/scrapy/extensions/feedexport.py\n+++ b/scrapy/extensions/feedexport.py\n@@ -199,9 +199,9 @@\n \n def __init__(self, settings):\n self.settings = settings\n- self.urifmt = settings['FEED_URI']\n- if not self.urifmt:\n+ if not settings['FEED_URI']:\n raise NotConfigured\n+ self.urifmt = str(settings['FEED_URI'])\n self.format = settings['FEED_FORMAT'].lower()\n self.export_encoding = settings['FEED_EXPORT_ENCODING']\n self.storages = self._load_components('FEED_STORAGES')\n", "issue": "Support pathlib.Path in FEED_URI\nMake things work the same when the value assigned to the `FEED_URI` setting is a string containing a path or an instance of `pathlib.Path`.\n", "before_files": [{"content": "\"\"\"\nFeed Exports extension\n\nSee documentation in docs/topics/feed-exports.rst\n\"\"\"\n\nimport os\nimport sys\nimport logging\nimport posixpath\nfrom tempfile import NamedTemporaryFile\nfrom datetime import datetime\nimport six\nfrom six.moves.urllib.parse import urlparse, unquote\nfrom ftplib import FTP\n\nfrom zope.interface import Interface, implementer\nfrom twisted.internet import defer, threads\nfrom w3lib.url import file_uri_to_path\n\nfrom scrapy import signals\nfrom scrapy.utils.ftp import ftp_makedirs_cwd\nfrom scrapy.exceptions import NotConfigured\nfrom scrapy.utils.misc import create_instance, load_object\nfrom scrapy.utils.log import failure_to_exc_info\nfrom scrapy.utils.python import without_none_values\nfrom scrapy.utils.boto import is_botocore\n\nlogger = logging.getLogger(__name__)\n\n\nclass IFeedStorage(Interface):\n \"\"\"Interface that all Feed Storages must implement\"\"\"\n\n def __init__(uri):\n \"\"\"Initialize the storage with the parameters given in the URI\"\"\"\n\n def open(spider):\n \"\"\"Open the storage for the given spider. It must return a file-like\n object that will be used for the exporters\"\"\"\n\n def store(file):\n \"\"\"Store the given file stream\"\"\"\n\n\n@implementer(IFeedStorage)\nclass BlockingFeedStorage(object):\n\n def open(self, spider):\n path = spider.crawler.settings['FEED_TEMPDIR']\n if path and not os.path.isdir(path):\n raise OSError('Not a Directory: ' + str(path))\n\n return NamedTemporaryFile(prefix='feed-', dir=path)\n\n def store(self, file):\n return threads.deferToThread(self._store_in_thread, file)\n\n def _store_in_thread(self, file):\n raise NotImplementedError\n\n\n@implementer(IFeedStorage)\nclass StdoutFeedStorage(object):\n\n def __init__(self, uri, _stdout=None):\n if not _stdout:\n _stdout = sys.stdout if six.PY2 else sys.stdout.buffer\n self._stdout = _stdout\n\n def open(self, spider):\n return self._stdout\n\n def store(self, file):\n pass\n\n\n@implementer(IFeedStorage)\nclass FileFeedStorage(object):\n\n def __init__(self, uri):\n self.path = file_uri_to_path(uri)\n\n def open(self, spider):\n dirname = os.path.dirname(self.path)\n if dirname and not os.path.exists(dirname):\n os.makedirs(dirname)\n return open(self.path, 'ab')\n\n def store(self, file):\n file.close()\n\n\nclass S3FeedStorage(BlockingFeedStorage):\n\n def __init__(self, uri, access_key=None, secret_key=None, acl=None):\n # BEGIN Backward compatibility for initialising without keys (and\n # without using from_crawler)\n no_defaults = access_key is None and secret_key is None\n if no_defaults:\n from scrapy.utils.project import get_project_settings\n settings = get_project_settings()\n if 'AWS_ACCESS_KEY_ID' in settings or 'AWS_SECRET_ACCESS_KEY' in settings:\n import warnings\n from scrapy.exceptions import ScrapyDeprecationWarning\n warnings.warn(\n \"Initialising `scrapy.extensions.feedexport.S3FeedStorage` \"\n \"without AWS keys is deprecated. Please supply credentials or \"\n \"use the `from_crawler()` constructor.\",\n category=ScrapyDeprecationWarning,\n stacklevel=2\n )\n access_key = settings['AWS_ACCESS_KEY_ID']\n secret_key = settings['AWS_SECRET_ACCESS_KEY']\n # END Backward compatibility\n u = urlparse(uri)\n self.bucketname = u.hostname\n self.access_key = u.username or access_key\n self.secret_key = u.password or secret_key\n self.is_botocore = is_botocore()\n self.keyname = u.path[1:] # remove first \"/\"\n self.acl = acl\n if self.is_botocore:\n import botocore.session\n session = botocore.session.get_session()\n self.s3_client = session.create_client(\n 's3', aws_access_key_id=self.access_key,\n aws_secret_access_key=self.secret_key)\n else:\n import boto\n self.connect_s3 = boto.connect_s3\n\n @classmethod\n def from_crawler(cls, crawler, uri):\n return cls(\n uri=uri,\n access_key=crawler.settings['AWS_ACCESS_KEY_ID'],\n secret_key=crawler.settings['AWS_SECRET_ACCESS_KEY'],\n acl=crawler.settings['FEED_STORAGE_S3_ACL'] or None\n )\n\n def _store_in_thread(self, file):\n file.seek(0)\n if self.is_botocore:\n kwargs = {'ACL': self.acl} if self.acl else {}\n self.s3_client.put_object(\n Bucket=self.bucketname, Key=self.keyname, Body=file,\n **kwargs)\n else:\n conn = self.connect_s3(self.access_key, self.secret_key)\n bucket = conn.get_bucket(self.bucketname, validate=False)\n key = bucket.new_key(self.keyname)\n kwargs = {'policy': self.acl} if self.acl else {}\n key.set_contents_from_file(file, **kwargs)\n key.close()\n\n\nclass FTPFeedStorage(BlockingFeedStorage):\n\n def __init__(self, uri, use_active_mode=False):\n u = urlparse(uri)\n self.host = u.hostname\n self.port = int(u.port or '21')\n self.username = u.username\n self.password = unquote(u.password)\n self.path = u.path\n self.use_active_mode = use_active_mode\n\n @classmethod\n def from_crawler(cls, crawler, uri):\n return cls(\n uri=uri,\n use_active_mode=crawler.settings.getbool('FEED_STORAGE_FTP_ACTIVE')\n )\n\n def _store_in_thread(self, file):\n file.seek(0)\n ftp = FTP()\n ftp.connect(self.host, self.port)\n ftp.login(self.username, self.password)\n if self.use_active_mode:\n ftp.set_pasv(False)\n dirname, filename = posixpath.split(self.path)\n ftp_makedirs_cwd(ftp, dirname)\n ftp.storbinary('STOR %s' % filename, file)\n ftp.quit()\n\n\nclass SpiderSlot(object):\n def __init__(self, file, exporter, storage, uri):\n self.file = file\n self.exporter = exporter\n self.storage = storage\n self.uri = uri\n self.itemcount = 0\n\n\nclass FeedExporter(object):\n\n def __init__(self, settings):\n self.settings = settings\n self.urifmt = settings['FEED_URI']\n if not self.urifmt:\n raise NotConfigured\n self.format = settings['FEED_FORMAT'].lower()\n self.export_encoding = settings['FEED_EXPORT_ENCODING']\n self.storages = self._load_components('FEED_STORAGES')\n self.exporters = self._load_components('FEED_EXPORTERS')\n if not self._storage_supported(self.urifmt):\n raise NotConfigured\n if not self._exporter_supported(self.format):\n raise NotConfigured\n self.store_empty = settings.getbool('FEED_STORE_EMPTY')\n self._exporting = False\n self.export_fields = settings.getlist('FEED_EXPORT_FIELDS') or None\n self.indent = None\n if settings.get('FEED_EXPORT_INDENT') is not None:\n self.indent = settings.getint('FEED_EXPORT_INDENT')\n uripar = settings['FEED_URI_PARAMS']\n self._uripar = load_object(uripar) if uripar else lambda x, y: None\n\n @classmethod\n def from_crawler(cls, crawler):\n o = cls(crawler.settings)\n o.crawler = crawler\n crawler.signals.connect(o.open_spider, signals.spider_opened)\n crawler.signals.connect(o.close_spider, signals.spider_closed)\n crawler.signals.connect(o.item_scraped, signals.item_scraped)\n return o\n\n def open_spider(self, spider):\n uri = self.urifmt % self._get_uri_params(spider)\n storage = self._get_storage(uri)\n file = storage.open(spider)\n exporter = self._get_exporter(file, fields_to_export=self.export_fields,\n encoding=self.export_encoding, indent=self.indent)\n if self.store_empty:\n exporter.start_exporting()\n self._exporting = True\n self.slot = SpiderSlot(file, exporter, storage, uri)\n\n def close_spider(self, spider):\n slot = self.slot\n if not slot.itemcount and not self.store_empty:\n # We need to call slot.storage.store nonetheless to get the file\n # properly closed.\n return defer.maybeDeferred(slot.storage.store, slot.file)\n if self._exporting:\n slot.exporter.finish_exporting()\n self._exporting = False\n logfmt = \"%s %%(format)s feed (%%(itemcount)d items) in: %%(uri)s\"\n log_args = {'format': self.format,\n 'itemcount': slot.itemcount,\n 'uri': slot.uri}\n d = defer.maybeDeferred(slot.storage.store, slot.file)\n d.addCallback(lambda _: logger.info(logfmt % \"Stored\", log_args,\n extra={'spider': spider}))\n d.addErrback(lambda f: logger.error(logfmt % \"Error storing\", log_args,\n exc_info=failure_to_exc_info(f),\n extra={'spider': spider}))\n return d\n\n def item_scraped(self, item, spider):\n slot = self.slot\n if not self._exporting:\n slot.exporter.start_exporting()\n self._exporting = True\n slot.exporter.export_item(item)\n slot.itemcount += 1\n return item\n\n def _load_components(self, setting_prefix):\n conf = without_none_values(self.settings.getwithbase(setting_prefix))\n d = {}\n for k, v in conf.items():\n try:\n d[k] = load_object(v)\n except NotConfigured:\n pass\n return d\n\n def _exporter_supported(self, format):\n if format in self.exporters:\n return True\n logger.error(\"Unknown feed format: %(format)s\", {'format': format})\n\n def _storage_supported(self, uri):\n scheme = urlparse(uri).scheme\n if scheme in self.storages:\n try:\n self._get_storage(uri)\n return True\n except NotConfigured as e:\n logger.error(\"Disabled feed storage scheme: %(scheme)s. \"\n \"Reason: %(reason)s\",\n {'scheme': scheme, 'reason': str(e)})\n else:\n logger.error(\"Unknown feed storage scheme: %(scheme)s\",\n {'scheme': scheme})\n\n def _get_instance(self, objcls, *args, **kwargs):\n return create_instance(\n objcls, self.settings, getattr(self, 'crawler', None),\n *args, **kwargs)\n\n def _get_exporter(self, *args, **kwargs):\n return self._get_instance(self.exporters[self.format], *args, **kwargs)\n\n def _get_storage(self, uri):\n return self._get_instance(self.storages[urlparse(uri).scheme], uri)\n\n def _get_uri_params(self, spider):\n params = {}\n for k in dir(spider):\n params[k] = getattr(spider, k)\n ts = datetime.utcnow().replace(microsecond=0).isoformat().replace(':', '-')\n params['time'] = ts\n self._uripar(params, spider)\n return params\n", "path": "scrapy/extensions/feedexport.py"}]}
| 3,919 | 159 |
gh_patches_debug_6553
|
rasdani/github-patches
|
git_diff
|
celery__kombu-821
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Timer.__iter__'s docstring is error.
``kombu.async.timer.Timer.__iter__`` yields a tuple of ``(wait_seconds, entry)``, not ``(entry, wait_seconds)``.
Should I provide pr to fix such small problem?
As a newcomer, I don't know if this is appropriate?
</issue>
<code>
[start of kombu/async/timer.py]
1 # -*- coding: utf-8 -*-
2 """Timer scheduling Python callbacks."""
3 from __future__ import absolute_import, unicode_literals
4
5 import heapq
6 import sys
7
8 from collections import namedtuple
9 from datetime import datetime
10 from functools import total_ordering
11 from weakref import proxy as weakrefproxy
12
13 from vine.utils import wraps
14
15 from kombu.five import monotonic, python_2_unicode_compatible
16 from kombu.log import get_logger
17 from time import time as _time
18
19 try:
20 from pytz import utc
21 except ImportError: # pragma: no cover
22 utc = None
23
24 __all__ = ['Entry', 'Timer', 'to_timestamp']
25
26 logger = get_logger(__name__)
27
28 DEFAULT_MAX_INTERVAL = 2
29 EPOCH = datetime.utcfromtimestamp(0).replace(tzinfo=utc)
30 IS_PYPY = hasattr(sys, 'pypy_version_info')
31
32 scheduled = namedtuple('scheduled', ('eta', 'priority', 'entry'))
33
34
35 def to_timestamp(d, default_timezone=utc, time=monotonic):
36 """Convert datetime to timestamp.
37
38 If d' is already a timestamp, then that will be used.
39 """
40 if isinstance(d, datetime):
41 if d.tzinfo is None:
42 d = d.replace(tzinfo=default_timezone)
43 diff = _time() - time()
44 return max((d - EPOCH).total_seconds() - diff, 0)
45 return d
46
47
48 @total_ordering
49 @python_2_unicode_compatible
50 class Entry(object):
51 """Schedule Entry."""
52
53 if not IS_PYPY: # pragma: no cover
54 __slots__ = (
55 'fun', 'args', 'kwargs', 'tref', 'canceled',
56 '_last_run', '__weakref__',
57 )
58
59 def __init__(self, fun, args=None, kwargs=None):
60 self.fun = fun
61 self.args = args or []
62 self.kwargs = kwargs or {}
63 self.tref = weakrefproxy(self)
64 self._last_run = None
65 self.canceled = False
66
67 def __call__(self):
68 return self.fun(*self.args, **self.kwargs)
69
70 def cancel(self):
71 try:
72 self.tref.canceled = True
73 except ReferenceError: # pragma: no cover
74 pass
75
76 def __repr__(self):
77 return '<TimerEntry: {0}(*{1!r}, **{2!r})'.format(
78 self.fun.__name__, self.args, self.kwargs)
79
80 # must not use hash() to order entries
81 def __lt__(self, other):
82 return id(self) < id(other)
83
84 @property
85 def cancelled(self):
86 return self.canceled
87
88 @cancelled.setter
89 def cancelled(self, value):
90 self.canceled = value
91
92
93 class Timer(object):
94 """Async timer implementation."""
95
96 Entry = Entry
97
98 on_error = None
99
100 def __init__(self, max_interval=None, on_error=None, **kwargs):
101 self.max_interval = float(max_interval or DEFAULT_MAX_INTERVAL)
102 self.on_error = on_error or self.on_error
103 self._queue = []
104
105 def __enter__(self):
106 return self
107
108 def __exit__(self, *exc_info):
109 self.stop()
110
111 def call_at(self, eta, fun, args=(), kwargs={}, priority=0):
112 return self.enter_at(self.Entry(fun, args, kwargs), eta, priority)
113
114 def call_after(self, secs, fun, args=(), kwargs={}, priority=0):
115 return self.enter_after(secs, self.Entry(fun, args, kwargs), priority)
116
117 def call_repeatedly(self, secs, fun, args=(), kwargs={}, priority=0):
118 tref = self.Entry(fun, args, kwargs)
119
120 @wraps(fun)
121 def _reschedules(*args, **kwargs):
122 last, now = tref._last_run, monotonic()
123 lsince = (now - tref._last_run) if last else secs
124 try:
125 if lsince and lsince >= secs:
126 tref._last_run = now
127 return fun(*args, **kwargs)
128 finally:
129 if not tref.canceled:
130 last = tref._last_run
131 next = secs - (now - last) if last else secs
132 self.enter_after(next, tref, priority)
133
134 tref.fun = _reschedules
135 tref._last_run = None
136 return self.enter_after(secs, tref, priority)
137
138 def enter_at(self, entry, eta=None, priority=0, time=monotonic):
139 """Enter function into the scheduler.
140
141 Arguments:
142 entry (~kombu.async.timer.Entry): Item to enter.
143 eta (datetime.datetime): Scheduled time.
144 priority (int): Unused.
145 """
146 if eta is None:
147 eta = time()
148 if isinstance(eta, datetime):
149 try:
150 eta = to_timestamp(eta)
151 except Exception as exc:
152 if not self.handle_error(exc):
153 raise
154 return
155 return self._enter(eta, priority, entry)
156
157 def enter_after(self, secs, entry, priority=0, time=monotonic):
158 return self.enter_at(entry, time() + secs, priority)
159
160 def _enter(self, eta, priority, entry, push=heapq.heappush):
161 push(self._queue, scheduled(eta, priority, entry))
162 return entry
163
164 def apply_entry(self, entry):
165 try:
166 entry()
167 except Exception as exc:
168 if not self.handle_error(exc):
169 logger.error('Error in timer: %r', exc, exc_info=True)
170
171 def handle_error(self, exc_info):
172 if self.on_error:
173 self.on_error(exc_info)
174 return True
175
176 def stop(self):
177 pass
178
179 def __iter__(self, min=min, nowfun=monotonic,
180 pop=heapq.heappop, push=heapq.heappush):
181 """Iterate over schedule.
182
183 This iterator yields a tuple of ``(entry, wait_seconds)``,
184 where if entry is :const:`None` the caller should wait
185 for ``wait_seconds`` until it polls the schedule again.
186 """
187 max_interval = self.max_interval
188 queue = self._queue
189
190 while 1:
191 if queue:
192 eventA = queue[0]
193 now, eta = nowfun(), eventA[0]
194
195 if now < eta:
196 yield min(eta - now, max_interval), None
197 else:
198 eventB = pop(queue)
199
200 if eventB is eventA:
201 entry = eventA[2]
202 if not entry.canceled:
203 yield None, entry
204 continue
205 else:
206 push(queue, eventB)
207 else:
208 yield None, None
209
210 def clear(self):
211 self._queue[:] = [] # atomic, without creating a new list.
212
213 def cancel(self, tref):
214 tref.cancel()
215
216 def __len__(self):
217 return len(self._queue)
218
219 def __nonzero__(self):
220 return True
221
222 @property
223 def queue(self, _pop=heapq.heappop):
224 """Snapshot of underlying datastructure."""
225 events = list(self._queue)
226 return [_pop(v) for v in [events] * len(events)]
227
228 @property
229 def schedule(self):
230 return self
231
[end of kombu/async/timer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kombu/async/timer.py b/kombu/async/timer.py
--- a/kombu/async/timer.py
+++ b/kombu/async/timer.py
@@ -180,7 +180,7 @@
pop=heapq.heappop, push=heapq.heappush):
"""Iterate over schedule.
- This iterator yields a tuple of ``(entry, wait_seconds)``,
+ This iterator yields a tuple of ``(wait_seconds, entry)``,
where if entry is :const:`None` the caller should wait
for ``wait_seconds`` until it polls the schedule again.
"""
|
{"golden_diff": "diff --git a/kombu/async/timer.py b/kombu/async/timer.py\n--- a/kombu/async/timer.py\n+++ b/kombu/async/timer.py\n@@ -180,7 +180,7 @@\n pop=heapq.heappop, push=heapq.heappush):\n \"\"\"Iterate over schedule.\n \n- This iterator yields a tuple of ``(entry, wait_seconds)``,\n+ This iterator yields a tuple of ``(wait_seconds, entry)``,\n where if entry is :const:`None` the caller should wait\n for ``wait_seconds`` until it polls the schedule again.\n \"\"\"\n", "issue": "Timer.__iter__'s docstring is error.\n``kombu.async.timer.Timer.__iter__`` yields a tuple of ``(wait_seconds, entry)``, not ``(entry, wait_seconds)``.\r\n\r\nShould I provide pr to fix such small problem?\r\nAs a newcomer, I don't know if this is appropriate?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Timer scheduling Python callbacks.\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nimport heapq\nimport sys\n\nfrom collections import namedtuple\nfrom datetime import datetime\nfrom functools import total_ordering\nfrom weakref import proxy as weakrefproxy\n\nfrom vine.utils import wraps\n\nfrom kombu.five import monotonic, python_2_unicode_compatible\nfrom kombu.log import get_logger\nfrom time import time as _time\n\ntry:\n from pytz import utc\nexcept ImportError: # pragma: no cover\n utc = None\n\n__all__ = ['Entry', 'Timer', 'to_timestamp']\n\nlogger = get_logger(__name__)\n\nDEFAULT_MAX_INTERVAL = 2\nEPOCH = datetime.utcfromtimestamp(0).replace(tzinfo=utc)\nIS_PYPY = hasattr(sys, 'pypy_version_info')\n\nscheduled = namedtuple('scheduled', ('eta', 'priority', 'entry'))\n\n\ndef to_timestamp(d, default_timezone=utc, time=monotonic):\n \"\"\"Convert datetime to timestamp.\n\n If d' is already a timestamp, then that will be used.\n \"\"\"\n if isinstance(d, datetime):\n if d.tzinfo is None:\n d = d.replace(tzinfo=default_timezone)\n diff = _time() - time()\n return max((d - EPOCH).total_seconds() - diff, 0)\n return d\n\n\n@total_ordering\n@python_2_unicode_compatible\nclass Entry(object):\n \"\"\"Schedule Entry.\"\"\"\n\n if not IS_PYPY: # pragma: no cover\n __slots__ = (\n 'fun', 'args', 'kwargs', 'tref', 'canceled',\n '_last_run', '__weakref__',\n )\n\n def __init__(self, fun, args=None, kwargs=None):\n self.fun = fun\n self.args = args or []\n self.kwargs = kwargs or {}\n self.tref = weakrefproxy(self)\n self._last_run = None\n self.canceled = False\n\n def __call__(self):\n return self.fun(*self.args, **self.kwargs)\n\n def cancel(self):\n try:\n self.tref.canceled = True\n except ReferenceError: # pragma: no cover\n pass\n\n def __repr__(self):\n return '<TimerEntry: {0}(*{1!r}, **{2!r})'.format(\n self.fun.__name__, self.args, self.kwargs)\n\n # must not use hash() to order entries\n def __lt__(self, other):\n return id(self) < id(other)\n\n @property\n def cancelled(self):\n return self.canceled\n\n @cancelled.setter\n def cancelled(self, value):\n self.canceled = value\n\n\nclass Timer(object):\n \"\"\"Async timer implementation.\"\"\"\n\n Entry = Entry\n\n on_error = None\n\n def __init__(self, max_interval=None, on_error=None, **kwargs):\n self.max_interval = float(max_interval or DEFAULT_MAX_INTERVAL)\n self.on_error = on_error or self.on_error\n self._queue = []\n\n def __enter__(self):\n return self\n\n def __exit__(self, *exc_info):\n self.stop()\n\n def call_at(self, eta, fun, args=(), kwargs={}, priority=0):\n return self.enter_at(self.Entry(fun, args, kwargs), eta, priority)\n\n def call_after(self, secs, fun, args=(), kwargs={}, priority=0):\n return self.enter_after(secs, self.Entry(fun, args, kwargs), priority)\n\n def call_repeatedly(self, secs, fun, args=(), kwargs={}, priority=0):\n tref = self.Entry(fun, args, kwargs)\n\n @wraps(fun)\n def _reschedules(*args, **kwargs):\n last, now = tref._last_run, monotonic()\n lsince = (now - tref._last_run) if last else secs\n try:\n if lsince and lsince >= secs:\n tref._last_run = now\n return fun(*args, **kwargs)\n finally:\n if not tref.canceled:\n last = tref._last_run\n next = secs - (now - last) if last else secs\n self.enter_after(next, tref, priority)\n\n tref.fun = _reschedules\n tref._last_run = None\n return self.enter_after(secs, tref, priority)\n\n def enter_at(self, entry, eta=None, priority=0, time=monotonic):\n \"\"\"Enter function into the scheduler.\n\n Arguments:\n entry (~kombu.async.timer.Entry): Item to enter.\n eta (datetime.datetime): Scheduled time.\n priority (int): Unused.\n \"\"\"\n if eta is None:\n eta = time()\n if isinstance(eta, datetime):\n try:\n eta = to_timestamp(eta)\n except Exception as exc:\n if not self.handle_error(exc):\n raise\n return\n return self._enter(eta, priority, entry)\n\n def enter_after(self, secs, entry, priority=0, time=monotonic):\n return self.enter_at(entry, time() + secs, priority)\n\n def _enter(self, eta, priority, entry, push=heapq.heappush):\n push(self._queue, scheduled(eta, priority, entry))\n return entry\n\n def apply_entry(self, entry):\n try:\n entry()\n except Exception as exc:\n if not self.handle_error(exc):\n logger.error('Error in timer: %r', exc, exc_info=True)\n\n def handle_error(self, exc_info):\n if self.on_error:\n self.on_error(exc_info)\n return True\n\n def stop(self):\n pass\n\n def __iter__(self, min=min, nowfun=monotonic,\n pop=heapq.heappop, push=heapq.heappush):\n \"\"\"Iterate over schedule.\n\n This iterator yields a tuple of ``(entry, wait_seconds)``,\n where if entry is :const:`None` the caller should wait\n for ``wait_seconds`` until it polls the schedule again.\n \"\"\"\n max_interval = self.max_interval\n queue = self._queue\n\n while 1:\n if queue:\n eventA = queue[0]\n now, eta = nowfun(), eventA[0]\n\n if now < eta:\n yield min(eta - now, max_interval), None\n else:\n eventB = pop(queue)\n\n if eventB is eventA:\n entry = eventA[2]\n if not entry.canceled:\n yield None, entry\n continue\n else:\n push(queue, eventB)\n else:\n yield None, None\n\n def clear(self):\n self._queue[:] = [] # atomic, without creating a new list.\n\n def cancel(self, tref):\n tref.cancel()\n\n def __len__(self):\n return len(self._queue)\n\n def __nonzero__(self):\n return True\n\n @property\n def queue(self, _pop=heapq.heappop):\n \"\"\"Snapshot of underlying datastructure.\"\"\"\n events = list(self._queue)\n return [_pop(v) for v in [events] * len(events)]\n\n @property\n def schedule(self):\n return self\n", "path": "kombu/async/timer.py"}]}
| 2,793 | 143 |
gh_patches_debug_59375
|
rasdani/github-patches
|
git_diff
|
cocotb__cocotb-797
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use sphinxcontrib-trio to document coroutines
It looks like https://sphinxcontrib-trio.readthedocs.io/ could help autodoc'ing our coroutines nicer.
</issue>
<code>
[start of documentation/source/conf.py]
1 # -*- coding: utf-8 -*-
2 #
3 # cocotb documentation build configuration file, created by
4 # sphinx-quickstart on Wed Jun 19 14:44:09 2013.
5 #
6 # This file is execfile()d with the current directory set to its containing dir.
7 #
8 # Note that not all possible configuration values are present in this
9 # autogenerated file.
10 #
11 # All configuration values have a default; values that are commented out
12 # serve to show the default.
13
14 import sys, os
15 import datetime
16
17 # If extensions (or modules to document with autodoc) are in another directory,
18 # add these directories to sys.path here. If the directory is relative to the
19 # documentation root, use os.path.abspath to make it absolute, like shown here.
20 sys.path.insert(0, os.path.abspath('../..'))
21
22 # Add in-tree extensions to path
23 sys.path.insert(0, os.path.abspath('../sphinxext'))
24
25 os.environ["SPHINX_BUILD"] = "1"
26
27 # -- General configuration -----------------------------------------------------
28
29 # If your documentation needs a minimal Sphinx version, state it here.
30 #needs_sphinx = '1.0'
31
32 # Add any Sphinx extension module names here, as strings. They can be extensions
33 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
34 extensions = [
35 'sphinx.ext.autodoc',
36 'sphinx.ext.doctest',
37 'sphinx.ext.todo',
38 'sphinx.ext.coverage',
39 'sphinx.ext.imgmath',
40 'sphinx.ext.viewcode',
41 'sphinx.ext.napoleon',
42 'sphinx.ext.intersphinx',
43 'cairosvgconverter',
44 ]
45
46 intersphinx_mapping = {'https://docs.python.org/3': None}
47
48 # Add any paths that contain templates here, relative to this directory.
49 templates_path = ['_templates']
50
51 # The suffix of source filenames.
52 source_suffix = '.rst'
53
54 # The encoding of source files.
55 #source_encoding = 'utf-8-sig'
56
57 # The master toctree document.
58 master_doc = 'index'
59
60 # General information about the project.
61 project = u'cocotb'
62 copyright = u'2014-{0}, PotentialVentures'.format(datetime.datetime.now().year)
63
64 # The version info for the project you're documenting, acts as replacement for
65 # |version| and |release|, also used in various other places throughout the
66 # built documents.
67 #
68 # The short X.Y version.
69 version = '1.1'
70 # The full version, including alpha/beta/rc tags.
71 release = '1.1'
72
73 # The language for content autogenerated by Sphinx. Refer to documentation
74 # for a list of supported languages.
75 #language = None
76
77 # There are two options for replacing |today|: either, you set today to some
78 # non-false value, then it is used:
79 #today = ''
80 # Else, today_fmt is used as the format for a strftime call.
81 #today_fmt = '%B %d, %Y'
82
83 # List of patterns, relative to source directory, that match files and
84 # directories to ignore when looking for source files.
85 exclude_patterns = []
86
87 # The reST default role (used for this markup: `text`) to use for all documents.
88 #default_role = None
89
90 # If true, '()' will be appended to :func: etc. cross-reference text.
91 #add_function_parentheses = True
92
93 # If true, the current module name will be prepended to all description
94 # unit titles (such as .. function::).
95 #add_module_names = True
96
97 # If true, sectionauthor and moduleauthor directives will be shown in the
98 # output. They are ignored by default.
99 #show_authors = False
100
101 # The name of the Pygments (syntax highlighting) style to use.
102 pygments_style = 'sphinx'
103
104 # A list of ignored prefixes for module index sorting.
105 #modindex_common_prefix = []
106
107 # If true, keep warnings as "system message" paragraphs in the built documents.
108 #keep_warnings = False
109
110
111 # -- Options for HTML output ---------------------------------------------------
112
113 # The theme to use for HTML and HTML Help pages. See the documentation for
114 # a list of builtin themes.
115 html_theme = 'default'
116
117 # Theme options are theme-specific and customize the look and feel of a theme
118 # further. For a list of options available for each theme, see the
119 # documentation.
120 #html_theme_options = {}
121
122 # Add any paths that contain custom themes here, relative to this directory.
123 #html_theme_path = []
124
125 # The name for this set of Sphinx documents. If None, it defaults to
126 # "<project> v<release> documentation".
127 #html_title = None
128
129 # A shorter title for the navigation bar. Default is the same as html_title.
130 #html_short_title = None
131
132 # The name of an image file (relative to this directory) to place at the top
133 # of the sidebar.
134 #html_logo = None
135
136 # The name of an image file (within the static path) to use as favicon of the
137 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
138 # pixels large.
139 #html_favicon = None
140
141 # Add any paths that contain custom static files (such as style sheets) here,
142 # relative to this directory. They are copied after the builtin static files,
143 # so a file named "default.css" will overwrite the builtin "default.css".
144 html_static_path = ['_static']
145
146 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
147 # using the given strftime format.
148 #html_last_updated_fmt = '%b %d, %Y'
149
150 # If true, SmartyPants will be used to convert quotes and dashes to
151 # typographically correct entities.
152 #html_use_smartypants = True
153
154 # Custom sidebar templates, maps document names to template names.
155 #html_sidebars = {}
156
157 # Additional templates that should be rendered to pages, maps page names to
158 # template names.
159 #html_additional_pages = {}
160
161 # If false, no module index is generated.
162 #html_domain_indices = True
163
164 # If false, no index is generated.
165 #html_use_index = True
166
167 # If true, the index is split into individual pages for each letter.
168 #html_split_index = False
169
170 # If true, links to the reST sources are added to the pages.
171 #html_show_sourcelink = True
172
173 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
174 #html_show_sphinx = True
175
176 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
177 #html_show_copyright = True
178
179 # If true, an OpenSearch description file will be output, and all pages will
180 # contain a <link> tag referring to it. The value of this option must be the
181 # base URL from which the finished HTML is served.
182 #html_use_opensearch = ''
183
184 # This is the file name suffix for HTML files (e.g. ".xhtml").
185 #html_file_suffix = None
186
187 # Output file base name for HTML help builder.
188 htmlhelp_basename = 'cocotbdoc'
189
190
191 # -- Options for LaTeX output --------------------------------------------------
192
193 latex_elements = {
194 # The paper size ('letterpaper' or 'a4paper').
195 #'papersize': 'letterpaper',
196
197 # The font size ('10pt', '11pt' or '12pt').
198 #'pointsize': '10pt',
199
200 # Additional stuff for the LaTeX preamble.
201 #'preamble': '',
202 }
203
204 # Grouping the document tree into LaTeX files. List of tuples
205 # (source start file, target name, title, author, documentclass [howto/manual]).
206 latex_documents = [
207 ('index', 'cocotb.tex', u'cocotb Documentation',
208 u'PotentialVentures', 'manual'),
209 ]
210
211 # The name of an image file (relative to this directory) to place at the top of
212 # the title page.
213 #latex_logo = None
214
215 # For "manual" documents, if this is true, then toplevel headings are parts,
216 # not chapters.
217 #latex_use_parts = False
218
219 # If true, show page references after internal links.
220 #latex_show_pagerefs = False
221
222 # If true, show URL addresses after external links.
223 #latex_show_urls = False
224
225 # Documents to append as an appendix to all manuals.
226 #latex_appendices = []
227
228 # If false, no module index is generated.
229 #latex_domain_indices = True
230
231
232 # -- Options for manual page output --------------------------------------------
233
234 # One entry per manual page. List of tuples
235 # (source start file, name, description, authors, manual section).
236 man_pages = [
237 ('index', 'cocotb', u'cocotb Documentation',
238 [u'PotentialVentures'], 1)
239 ]
240
241 # If true, show URL addresses after external links.
242 #man_show_urls = False
243
244
245 # -- Options for Texinfo output ------------------------------------------------
246
247 # Grouping the document tree into Texinfo files. List of tuples
248 # (source start file, target name, title, author,
249 # dir menu entry, description, category)
250 texinfo_documents = [
251 ('index', 'cocotb', u'cocotb Documentation',
252 u'PotentialVentures', 'cocotb', 'Coroutine Cosimulation TestBench \
253 environment for efficient verification of RTL using Python.',
254 'Miscellaneous'),
255 ]
256
257 # Documents to append as an appendix to all manuals.
258 #texinfo_appendices = []
259
260 # If false, no module index is generated.
261 #texinfo_domain_indices = True
262
263 # How to display URL addresses: 'footnote', 'no', or 'inline'.
264 #texinfo_show_urls = 'footnote'
265
266 # If true, do not generate a @detailmenu in the "Top" node's menu.
267 #texinfo_no_detailmenu = False
268
269 # For now show the todoy
270 todo_include_todos = True
271
[end of documentation/source/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/documentation/source/conf.py b/documentation/source/conf.py
--- a/documentation/source/conf.py
+++ b/documentation/source/conf.py
@@ -41,6 +41,7 @@
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'cairosvgconverter',
+ 'sphinxcontrib_trio',
]
intersphinx_mapping = {'https://docs.python.org/3': None}
|
{"golden_diff": "diff --git a/documentation/source/conf.py b/documentation/source/conf.py\n--- a/documentation/source/conf.py\n+++ b/documentation/source/conf.py\n@@ -41,6 +41,7 @@\n 'sphinx.ext.napoleon',\n 'sphinx.ext.intersphinx',\n 'cairosvgconverter',\n+ 'sphinxcontrib_trio',\n ]\n \n intersphinx_mapping = {'https://docs.python.org/3': None}\n", "issue": "Use sphinxcontrib-trio to document coroutines\nIt looks like https://sphinxcontrib-trio.readthedocs.io/ could help autodoc'ing our coroutines nicer. \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# cocotb documentation build configuration file, created by\n# sphinx-quickstart on Wed Jun 19 14:44:09 2013.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys, os\nimport datetime\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('../..'))\n\n# Add in-tree extensions to path\nsys.path.insert(0, os.path.abspath('../sphinxext'))\n\nos.environ[\"SPHINX_BUILD\"] = \"1\"\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'sphinx.ext.autodoc', \n 'sphinx.ext.doctest', \n 'sphinx.ext.todo', \n 'sphinx.ext.coverage', \n 'sphinx.ext.imgmath', \n 'sphinx.ext.viewcode',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.intersphinx',\n 'cairosvgconverter',\n ]\n\nintersphinx_mapping = {'https://docs.python.org/3': None}\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'cocotb'\ncopyright = u'2014-{0}, PotentialVentures'.format(datetime.datetime.now().year)\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '1.1'\n# The full version, including alpha/beta/rc tags.\nrelease = '1.1'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = []\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'default'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'cocotbdoc'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'cocotb.tex', u'cocotb Documentation',\n u'PotentialVentures', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'cocotb', u'cocotb Documentation',\n [u'PotentialVentures'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'cocotb', u'cocotb Documentation',\n u'PotentialVentures', 'cocotb', 'Coroutine Cosimulation TestBench \\\n environment for efficient verification of RTL using Python.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n# For now show the todoy \ntodo_include_todos = True\n", "path": "documentation/source/conf.py"}]}
| 3,434 | 91 |
gh_patches_debug_9770
|
rasdani/github-patches
|
git_diff
|
spyder-ide__spyder-14543
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DeprecationWarning: implicit conversion to integers in spyder/widgets/colors.py:78
When running the test suite, I received the following warning:
```python
<<<PACKAGEDIR>>>/spyder/widgets/colors.py:78: DeprecationWarning: an integer is required (got type float). Implicit conversion to integers using __int__ is deprecated, and may be removed in a future version of Python.
self.lineedit.setMinimumWidth(fm.width(color.name()) * 1.2)
```
Changing this to say
```python
self.lineedit.setMinimumWidth(int(fm.width(color.name()) * 1.2))
```
should be sufficient to fix this.
</issue>
<code>
[start of spyder/widgets/colors.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright © Spyder Project Contributors
4 # Licensed under the terms of the MIT License
5 # (see spyder/__init__.py for details)
6
7 # Third party imports
8 from qtpy.QtCore import Property, QSize, Signal, Slot
9 from qtpy.QtGui import QColor, QIcon, QPixmap
10 from qtpy.QtWidgets import QColorDialog, QHBoxLayout, QLineEdit, QToolButton
11
12 # Local imports
13 from spyder.py3compat import is_text_string
14
15
16 class ColorButton(QToolButton):
17 """
18 Color choosing push button
19 """
20 colorChanged = Signal(QColor)
21
22 def __init__(self, parent=None):
23 QToolButton.__init__(self, parent)
24 self.setFixedSize(20, 20)
25 self.setIconSize(QSize(12, 12))
26 self.clicked.connect(self.choose_color)
27 self._color = QColor()
28
29 def choose_color(self):
30 color = QColorDialog.getColor(self._color, self.parentWidget(),
31 'Select Color',
32 QColorDialog.ShowAlphaChannel)
33 if color.isValid():
34 self.set_color(color)
35
36 def get_color(self):
37 return self._color
38
39 @Slot(QColor)
40 def set_color(self, color):
41 if color != self._color:
42 self._color = color
43 self.colorChanged.emit(self._color)
44 pixmap = QPixmap(self.iconSize())
45 pixmap.fill(color)
46 self.setIcon(QIcon(pixmap))
47
48 color = Property("QColor", get_color, set_color)
49
50
51 def text_to_qcolor(text):
52 """
53 Create a QColor from specified string
54 Avoid warning from Qt when an invalid QColor is instantiated
55 """
56 color = QColor()
57 text = str(text)
58 if not is_text_string(text):
59 return color
60 if text.startswith('#') and len(text)==7:
61 correct = '#0123456789abcdef'
62 for char in text:
63 if char.lower() not in correct:
64 return color
65 elif text not in list(QColor.colorNames()):
66 return color
67 color.setNamedColor(text)
68 return color
69
70
71 class ColorLayout(QHBoxLayout):
72 """Color-specialized QLineEdit layout"""
73 def __init__(self, color, parent=None):
74 QHBoxLayout.__init__(self)
75 assert isinstance(color, QColor)
76 self.lineedit = QLineEdit(color.name(), parent)
77 fm = self.lineedit.fontMetrics()
78 self.lineedit.setMinimumWidth(fm.width(color.name()) * 1.2)
79 self.lineedit.textChanged.connect(self.update_color)
80 self.addWidget(self.lineedit)
81 self.colorbtn = ColorButton(parent)
82 self.colorbtn.color = color
83 self.colorbtn.colorChanged.connect(self.update_text)
84 self.addWidget(self.colorbtn)
85
86 def update_color(self, text):
87 color = text_to_qcolor(text)
88 if color.isValid():
89 self.colorbtn.color = color
90
91 def update_text(self, color):
92 self.lineedit.setText(color.name())
93
94 def text(self):
95 return self.lineedit.text()
96
[end of spyder/widgets/colors.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/spyder/widgets/colors.py b/spyder/widgets/colors.py
--- a/spyder/widgets/colors.py
+++ b/spyder/widgets/colors.py
@@ -75,7 +75,7 @@
assert isinstance(color, QColor)
self.lineedit = QLineEdit(color.name(), parent)
fm = self.lineedit.fontMetrics()
- self.lineedit.setMinimumWidth(fm.width(color.name()) * 1.2)
+ self.lineedit.setMinimumWidth(int(fm.width(color.name()) * 1.2))
self.lineedit.textChanged.connect(self.update_color)
self.addWidget(self.lineedit)
self.colorbtn = ColorButton(parent)
|
{"golden_diff": "diff --git a/spyder/widgets/colors.py b/spyder/widgets/colors.py\n--- a/spyder/widgets/colors.py\n+++ b/spyder/widgets/colors.py\n@@ -75,7 +75,7 @@\n assert isinstance(color, QColor)\r\n self.lineedit = QLineEdit(color.name(), parent)\r\n fm = self.lineedit.fontMetrics()\r\n- self.lineedit.setMinimumWidth(fm.width(color.name()) * 1.2)\r\n+ self.lineedit.setMinimumWidth(int(fm.width(color.name()) * 1.2))\r\n self.lineedit.textChanged.connect(self.update_color)\r\n self.addWidget(self.lineedit)\r\n self.colorbtn = ColorButton(parent)\n", "issue": "DeprecationWarning: implicit conversion to integers in spyder/widgets/colors.py:78\nWhen running the test suite, I received the following warning:\r\n\r\n```python\r\n <<<PACKAGEDIR>>>/spyder/widgets/colors.py:78: DeprecationWarning: an integer is required (got type float). Implicit conversion to integers using __int__ is deprecated, and may be removed in a future version of Python.\r\n self.lineedit.setMinimumWidth(fm.width(color.name()) * 1.2)\r\n```\r\n\r\nChanging this to say\r\n```python\r\n self.lineedit.setMinimumWidth(int(fm.width(color.name()) * 1.2))\r\n```\r\nshould be sufficient to fix this.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\r\n#\r\n# Copyright \u00a9 Spyder Project Contributors\r\n# Licensed under the terms of the MIT License\r\n# (see spyder/__init__.py for details)\r\n\r\n# Third party imports\r\nfrom qtpy.QtCore import Property, QSize, Signal, Slot\r\nfrom qtpy.QtGui import QColor, QIcon, QPixmap\r\nfrom qtpy.QtWidgets import QColorDialog, QHBoxLayout, QLineEdit, QToolButton\r\n\r\n# Local imports\r\nfrom spyder.py3compat import is_text_string\r\n\r\n\r\nclass ColorButton(QToolButton):\r\n \"\"\"\r\n Color choosing push button\r\n \"\"\"\r\n colorChanged = Signal(QColor)\r\n\r\n def __init__(self, parent=None):\r\n QToolButton.__init__(self, parent)\r\n self.setFixedSize(20, 20)\r\n self.setIconSize(QSize(12, 12))\r\n self.clicked.connect(self.choose_color)\r\n self._color = QColor()\r\n\r\n def choose_color(self):\r\n color = QColorDialog.getColor(self._color, self.parentWidget(),\r\n 'Select Color',\r\n QColorDialog.ShowAlphaChannel)\r\n if color.isValid():\r\n self.set_color(color)\r\n\r\n def get_color(self):\r\n return self._color\r\n\r\n @Slot(QColor)\r\n def set_color(self, color):\r\n if color != self._color:\r\n self._color = color\r\n self.colorChanged.emit(self._color)\r\n pixmap = QPixmap(self.iconSize())\r\n pixmap.fill(color)\r\n self.setIcon(QIcon(pixmap))\r\n\r\n color = Property(\"QColor\", get_color, set_color)\r\n\r\n\r\ndef text_to_qcolor(text):\r\n \"\"\"\r\n Create a QColor from specified string\r\n Avoid warning from Qt when an invalid QColor is instantiated\r\n \"\"\"\r\n color = QColor()\r\n text = str(text)\r\n if not is_text_string(text):\r\n return color\r\n if text.startswith('#') and len(text)==7:\r\n correct = '#0123456789abcdef'\r\n for char in text:\r\n if char.lower() not in correct:\r\n return color\r\n elif text not in list(QColor.colorNames()):\r\n return color\r\n color.setNamedColor(text)\r\n return color\r\n\r\n\r\nclass ColorLayout(QHBoxLayout):\r\n \"\"\"Color-specialized QLineEdit layout\"\"\"\r\n def __init__(self, color, parent=None):\r\n QHBoxLayout.__init__(self)\r\n assert isinstance(color, QColor)\r\n self.lineedit = QLineEdit(color.name(), parent)\r\n fm = self.lineedit.fontMetrics()\r\n self.lineedit.setMinimumWidth(fm.width(color.name()) * 1.2)\r\n self.lineedit.textChanged.connect(self.update_color)\r\n self.addWidget(self.lineedit)\r\n self.colorbtn = ColorButton(parent)\r\n self.colorbtn.color = color\r\n self.colorbtn.colorChanged.connect(self.update_text)\r\n self.addWidget(self.colorbtn)\r\n\r\n def update_color(self, text):\r\n color = text_to_qcolor(text)\r\n if color.isValid():\r\n self.colorbtn.color = color\r\n\r\n def update_text(self, color):\r\n self.lineedit.setText(color.name())\r\n\r\n def text(self):\r\n return self.lineedit.text()\r\n", "path": "spyder/widgets/colors.py"}]}
| 1,510 | 139 |
gh_patches_debug_34015
|
rasdani/github-patches
|
git_diff
|
facebookresearch__hydra-2277
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] distutils deprecation warning makes test_completion.py fail on Python 3.10
# 🐛 Bug
## Description
<!-- A clear and concise description of what the bug is. -->
distutils is deprecated in Python 3.10 and its import in https://github.com/facebookresearch/hydra/blob/8b9ce30802165c1b24dbf4e513eb78d8ca8cacd6/tests/test_completion.py#L2 therefore emits a warning that makes the pytest run fail.
## Checklist
- [x] I checked on the latest version of Hydra
- [x] I created a minimal repro (See [this](https://stackoverflow.com/help/minimal-reproducible-example) for tips).
## To reproduce
** Minimal Code/Config snippet to reproduce **
- Run pytest with a Python 3.10 interpreter.
See conda build meta.yaml here: https://github.com/conda-forge/hydra-core-feedstock/blob/e62d99bd0afdcca166a731941f230b848015cbbe/recipe/meta.yaml
and associated CI failure here: https://dev.azure.com/conda-forge/feedstock-builds/_build/results?buildId=529074&view=logs&j=656edd35-690f-5c53-9ba3-09c10d0bea97&t=e5c8ab1d-8ff9-5cae-b332-e15ae582ed2d
(ignoring the warning per `-W` flag didn't work here)
**Stack trace/error message**
```
==================================== ERRORS ====================================
__________________ ERROR collecting tests/test_completion.py ___________________
tests/test_completion.py:2: in <module>
import distutils.spawn
../_test_env_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_place/lib/python3.10/distutils/__init__.py:19: in <module>
warnings.warn(_DEPRECATION_MESSAGE,
E DeprecationWarning: The distutils package is deprecated and slated for removal in Python 3.12. Use setuptools or check PEP 632 for potential alternatives
=========================== short test summary info ============================
ERROR tests/test_completion.py - DeprecationWarning: The distutils package is...
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 1.18s ===============================
Tests failed for hydra-core-1.2.0-pyhd8ed1ab_0.tar.bz2 - moving package to /home/conda/feedstock_root/build_artifacts/broken
WARNING:conda_build.build:Tests failed for hydra-core-1.2.0-pyhd8ed1ab_0.tar.bz2 - moving package to /home/conda/feedstock_root/build_artifacts/broken
TESTS FAILED: hydra-core-1.2.0-pyhd8ed1ab_0.tar.bz2
```
## Expected Behavior
<!-- A clear and concise description of what you expected to happen. -->
Tests pass
## System information
- **Hydra Version** : 1.2.0
- **Python version** : 3.10.5
- **Virtual environment type and version** : conda env
- **Operating system** : Linux
## Additional context
~~This currently blocks the conda-forge feedstock update at https://github.com/conda-forge/hydra-core-feedstock/pull/19.
Workaround suggestions for this PR (preferably without file changes) are welcome.~~
Update: Got the tests to pass by adding the `-W ignore::DeprecationWarning` flag to the pytest args. With this workaround I have completed the feedstock update and hydra-core 1.2 should be available from conda-forge in a few minutes.
</issue>
<code>
[start of build_helpers/__init__.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import setuptools # isort:skip # noqa
3 import distutils # isort:skip # noqa
4
[end of build_helpers/__init__.py]
[start of build_helpers/build_helpers.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import codecs
3 import distutils.log
4 import errno
5 import os
6 import re
7 import shutil
8 import subprocess
9 from os.path import abspath, basename, dirname, exists, isdir, join
10 from typing import List, Optional
11
12 from setuptools import Command
13 from setuptools.command import build_py, develop, sdist
14
15
16 def find_version(*file_paths: str) -> str:
17 with codecs.open(os.path.join(*file_paths), "r") as fp:
18 version_file = fp.read()
19 version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
20 if version_match:
21 return version_match.group(1)
22 raise RuntimeError("Unable to find version string.")
23
24
25 def matches(patterns: List[str], string: str) -> bool:
26 string = string.replace("\\", "/")
27 for pattern in patterns:
28 if re.match(pattern, string):
29 return True
30 return False
31
32
33 def find_(
34 root: str,
35 rbase: str,
36 include_files: List[str],
37 include_dirs: List[str],
38 excludes: List[str],
39 scan_exclude: List[str],
40 ) -> List[str]:
41 files = []
42 scan_root = os.path.join(root, rbase)
43 with os.scandir(scan_root) as it:
44 for entry in it:
45 path = os.path.join(rbase, entry.name)
46 if matches(scan_exclude, path):
47 continue
48
49 if entry.is_dir():
50 if matches(include_dirs, path):
51 if not matches(excludes, path):
52 files.append(path)
53 else:
54 ret = find_(
55 root=root,
56 rbase=path,
57 include_files=include_files,
58 include_dirs=include_dirs,
59 excludes=excludes,
60 scan_exclude=scan_exclude,
61 )
62 files.extend(ret)
63 else:
64 if matches(include_files, path) and not matches(excludes, path):
65 files.append(path)
66
67 return files
68
69
70 def find(
71 root: str,
72 include_files: List[str],
73 include_dirs: List[str],
74 excludes: List[str],
75 scan_exclude: Optional[List[str]] = None,
76 ) -> List[str]:
77 if scan_exclude is None:
78 scan_exclude = []
79 return find_(
80 root=root,
81 rbase="",
82 include_files=include_files,
83 include_dirs=include_dirs,
84 excludes=excludes,
85 scan_exclude=scan_exclude,
86 )
87
88
89 class CleanCommand(Command): # type: ignore
90 """
91 Our custom command to clean out junk files.
92 """
93
94 description = "Cleans out generated and junk files we don't want in the repo"
95 dry_run: bool
96 user_options: List[str] = []
97
98 def run(self) -> None:
99 files = find(
100 ".",
101 include_files=["^hydra/grammar/gen/.*"],
102 include_dirs=[
103 "\\.egg-info$",
104 "^.pytest_cache$",
105 ".*/__pycache__$",
106 ".*/multirun$",
107 ".*/outputs$",
108 "^build$",
109 ],
110 scan_exclude=["^.git$", "^.nox/.*$", "^website/.*$"],
111 excludes=[".*\\.gitignore$"],
112 )
113
114 if self.dry_run:
115 print("Would clean up the following files and dirs")
116 print("\n".join(files))
117 else:
118 for f in files:
119 if exists(f):
120 if isdir(f):
121 shutil.rmtree(f, ignore_errors=True)
122 else:
123 os.unlink(f)
124
125 def initialize_options(self) -> None:
126 pass
127
128 def finalize_options(self) -> None:
129 pass
130
131
132 def run_antlr(cmd: Command) -> None:
133 try:
134 cmd.announce("Generating parsers with antlr4", level=distutils.log.INFO)
135 cmd.run_command("antlr")
136 except OSError as e:
137 if e.errno == errno.ENOENT:
138 msg = f"| Unable to generate parsers: {e} |"
139 msg = "=" * len(msg) + "\n" + msg + "\n" + "=" * len(msg)
140 cmd.announce(f"{msg}", level=distutils.log.FATAL)
141 exit(1)
142 else:
143 raise
144
145
146 class BuildPyCommand(build_py.build_py):
147 def run(self) -> None:
148 if not self.dry_run:
149 self.run_command("clean")
150 run_antlr(self)
151 build_py.build_py.run(self)
152
153
154 class Develop(develop.develop):
155 def run(self) -> None: # type: ignore
156 if not self.dry_run:
157 run_antlr(self)
158 develop.develop.run(self)
159
160
161 class SDistCommand(sdist.sdist):
162 def run(self) -> None:
163 if not self.dry_run: # type: ignore
164 self.run_command("clean")
165 run_antlr(self)
166 sdist.sdist.run(self)
167
168
169 class ANTLRCommand(Command): # type: ignore
170 """Generate parsers using ANTLR."""
171
172 description = "Run ANTLR"
173 user_options: List[str] = []
174
175 def run(self) -> None:
176 """Run command."""
177 root_dir = abspath(dirname(__file__))
178 project_root = abspath(dirname(basename(__file__)))
179 for grammar in [
180 "hydra/grammar/OverrideLexer.g4",
181 "hydra/grammar/OverrideParser.g4",
182 ]:
183 command = [
184 "java",
185 "-jar",
186 join(root_dir, "bin/antlr-4.9.3-complete.jar"),
187 "-Dlanguage=Python3",
188 "-o",
189 join(project_root, "hydra/grammar/gen/"),
190 "-Xexact-output-dir",
191 "-visitor",
192 join(project_root, grammar),
193 ]
194
195 self.announce(
196 f"Generating parser for Python3: {command}", level=distutils.log.INFO
197 )
198
199 subprocess.check_call(command)
200
201 def initialize_options(self) -> None:
202 pass
203
204 def finalize_options(self) -> None:
205 pass
206
[end of build_helpers/build_helpers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/build_helpers/__init__.py b/build_helpers/__init__.py
--- a/build_helpers/__init__.py
+++ b/build_helpers/__init__.py
@@ -1,3 +1,2 @@
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import setuptools # isort:skip # noqa
-import distutils # isort:skip # noqa
diff --git a/build_helpers/build_helpers.py b/build_helpers/build_helpers.py
--- a/build_helpers/build_helpers.py
+++ b/build_helpers/build_helpers.py
@@ -1,7 +1,7 @@
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import codecs
-import distutils.log
import errno
+import logging
import os
import re
import shutil
@@ -12,6 +12,8 @@
from setuptools import Command
from setuptools.command import build_py, develop, sdist
+log = logging.getLogger(__name__)
+
def find_version(*file_paths: str) -> str:
with codecs.open(os.path.join(*file_paths), "r") as fp:
@@ -131,13 +133,13 @@
def run_antlr(cmd: Command) -> None:
try:
- cmd.announce("Generating parsers with antlr4", level=distutils.log.INFO)
+ log.info("Generating parsers with antlr4")
cmd.run_command("antlr")
except OSError as e:
if e.errno == errno.ENOENT:
msg = f"| Unable to generate parsers: {e} |"
msg = "=" * len(msg) + "\n" + msg + "\n" + "=" * len(msg)
- cmd.announce(f"{msg}", level=distutils.log.FATAL)
+ log.critical(f"{msg}")
exit(1)
else:
raise
@@ -192,9 +194,7 @@
join(project_root, grammar),
]
- self.announce(
- f"Generating parser for Python3: {command}", level=distutils.log.INFO
- )
+ log.info(f"Generating parser for Python3: {command}")
subprocess.check_call(command)
|
{"golden_diff": "diff --git a/build_helpers/__init__.py b/build_helpers/__init__.py\n--- a/build_helpers/__init__.py\n+++ b/build_helpers/__init__.py\n@@ -1,3 +1,2 @@\n # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n import setuptools # isort:skip # noqa\n-import distutils # isort:skip # noqa\ndiff --git a/build_helpers/build_helpers.py b/build_helpers/build_helpers.py\n--- a/build_helpers/build_helpers.py\n+++ b/build_helpers/build_helpers.py\n@@ -1,7 +1,7 @@\n # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n import codecs\n-import distutils.log\n import errno\n+import logging\n import os\n import re\n import shutil\n@@ -12,6 +12,8 @@\n from setuptools import Command\n from setuptools.command import build_py, develop, sdist\n \n+log = logging.getLogger(__name__)\n+\n \n def find_version(*file_paths: str) -> str:\n with codecs.open(os.path.join(*file_paths), \"r\") as fp:\n@@ -131,13 +133,13 @@\n \n def run_antlr(cmd: Command) -> None:\n try:\n- cmd.announce(\"Generating parsers with antlr4\", level=distutils.log.INFO)\n+ log.info(\"Generating parsers with antlr4\")\n cmd.run_command(\"antlr\")\n except OSError as e:\n if e.errno == errno.ENOENT:\n msg = f\"| Unable to generate parsers: {e} |\"\n msg = \"=\" * len(msg) + \"\\n\" + msg + \"\\n\" + \"=\" * len(msg)\n- cmd.announce(f\"{msg}\", level=distutils.log.FATAL)\n+ log.critical(f\"{msg}\")\n exit(1)\n else:\n raise\n@@ -192,9 +194,7 @@\n join(project_root, grammar),\n ]\n \n- self.announce(\n- f\"Generating parser for Python3: {command}\", level=distutils.log.INFO\n- )\n+ log.info(f\"Generating parser for Python3: {command}\")\n \n subprocess.check_call(command)\n", "issue": "[Bug] distutils deprecation warning makes test_completion.py fail on Python 3.10\n# \ud83d\udc1b Bug\r\n## Description\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\ndistutils is deprecated in Python 3.10 and its import in https://github.com/facebookresearch/hydra/blob/8b9ce30802165c1b24dbf4e513eb78d8ca8cacd6/tests/test_completion.py#L2 therefore emits a warning that makes the pytest run fail.\r\n\r\n## Checklist\r\n- [x] I checked on the latest version of Hydra\r\n- [x] I created a minimal repro (See [this](https://stackoverflow.com/help/minimal-reproducible-example) for tips).\r\n\r\n## To reproduce\r\n\r\n** Minimal Code/Config snippet to reproduce **\r\n\r\n- Run pytest with a Python 3.10 interpreter.\r\n\r\nSee conda build meta.yaml here: https://github.com/conda-forge/hydra-core-feedstock/blob/e62d99bd0afdcca166a731941f230b848015cbbe/recipe/meta.yaml\r\nand associated CI failure here: https://dev.azure.com/conda-forge/feedstock-builds/_build/results?buildId=529074&view=logs&j=656edd35-690f-5c53-9ba3-09c10d0bea97&t=e5c8ab1d-8ff9-5cae-b332-e15ae582ed2d\r\n\r\n(ignoring the warning per `-W` flag didn't work here)\r\n\r\n**Stack trace/error message**\r\n```\r\n==================================== ERRORS ====================================\r\n__________________ ERROR collecting tests/test_completion.py ___________________\r\ntests/test_completion.py:2: in <module>\r\n import distutils.spawn\r\n../_test_env_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_place/lib/python3.10/distutils/__init__.py:19: in <module>\r\n warnings.warn(_DEPRECATION_MESSAGE,\r\nE DeprecationWarning: The distutils package is deprecated and slated for removal in Python 3.12. Use setuptools or check PEP 632 for potential alternatives\r\n=========================== short test summary info ============================\r\nERROR tests/test_completion.py - DeprecationWarning: The distutils package is...\r\n!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!\r\n=============================== 1 error in 1.18s ===============================\r\nTests failed for hydra-core-1.2.0-pyhd8ed1ab_0.tar.bz2 - moving package to /home/conda/feedstock_root/build_artifacts/broken\r\nWARNING:conda_build.build:Tests failed for hydra-core-1.2.0-pyhd8ed1ab_0.tar.bz2 - moving package to /home/conda/feedstock_root/build_artifacts/broken\r\nTESTS FAILED: hydra-core-1.2.0-pyhd8ed1ab_0.tar.bz2\r\n```\r\n\r\n## Expected Behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\nTests pass\r\n\r\n## System information\r\n- **Hydra Version** : 1.2.0\r\n- **Python version** : 3.10.5\r\n- **Virtual environment type and version** : conda env\r\n- **Operating system** : Linux\r\n\r\n## Additional context\r\n\r\n~~This currently blocks the conda-forge feedstock update at https://github.com/conda-forge/hydra-core-feedstock/pull/19.\r\nWorkaround suggestions for this PR (preferably without file changes) are welcome.~~\r\nUpdate: Got the tests to pass by adding the `-W ignore::DeprecationWarning` flag to the pytest args. With this workaround I have completed the feedstock update and hydra-core 1.2 should be available from conda-forge in a few minutes.\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport setuptools # isort:skip # noqa\nimport distutils # isort:skip # noqa\n", "path": "build_helpers/__init__.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport codecs\nimport distutils.log\nimport errno\nimport os\nimport re\nimport shutil\nimport subprocess\nfrom os.path import abspath, basename, dirname, exists, isdir, join\nfrom typing import List, Optional\n\nfrom setuptools import Command\nfrom setuptools.command import build_py, develop, sdist\n\n\ndef find_version(*file_paths: str) -> str:\n with codecs.open(os.path.join(*file_paths), \"r\") as fp:\n version_file = fp.read()\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\ndef matches(patterns: List[str], string: str) -> bool:\n string = string.replace(\"\\\\\", \"/\")\n for pattern in patterns:\n if re.match(pattern, string):\n return True\n return False\n\n\ndef find_(\n root: str,\n rbase: str,\n include_files: List[str],\n include_dirs: List[str],\n excludes: List[str],\n scan_exclude: List[str],\n) -> List[str]:\n files = []\n scan_root = os.path.join(root, rbase)\n with os.scandir(scan_root) as it:\n for entry in it:\n path = os.path.join(rbase, entry.name)\n if matches(scan_exclude, path):\n continue\n\n if entry.is_dir():\n if matches(include_dirs, path):\n if not matches(excludes, path):\n files.append(path)\n else:\n ret = find_(\n root=root,\n rbase=path,\n include_files=include_files,\n include_dirs=include_dirs,\n excludes=excludes,\n scan_exclude=scan_exclude,\n )\n files.extend(ret)\n else:\n if matches(include_files, path) and not matches(excludes, path):\n files.append(path)\n\n return files\n\n\ndef find(\n root: str,\n include_files: List[str],\n include_dirs: List[str],\n excludes: List[str],\n scan_exclude: Optional[List[str]] = None,\n) -> List[str]:\n if scan_exclude is None:\n scan_exclude = []\n return find_(\n root=root,\n rbase=\"\",\n include_files=include_files,\n include_dirs=include_dirs,\n excludes=excludes,\n scan_exclude=scan_exclude,\n )\n\n\nclass CleanCommand(Command): # type: ignore\n \"\"\"\n Our custom command to clean out junk files.\n \"\"\"\n\n description = \"Cleans out generated and junk files we don't want in the repo\"\n dry_run: bool\n user_options: List[str] = []\n\n def run(self) -> None:\n files = find(\n \".\",\n include_files=[\"^hydra/grammar/gen/.*\"],\n include_dirs=[\n \"\\\\.egg-info$\",\n \"^.pytest_cache$\",\n \".*/__pycache__$\",\n \".*/multirun$\",\n \".*/outputs$\",\n \"^build$\",\n ],\n scan_exclude=[\"^.git$\", \"^.nox/.*$\", \"^website/.*$\"],\n excludes=[\".*\\\\.gitignore$\"],\n )\n\n if self.dry_run:\n print(\"Would clean up the following files and dirs\")\n print(\"\\n\".join(files))\n else:\n for f in files:\n if exists(f):\n if isdir(f):\n shutil.rmtree(f, ignore_errors=True)\n else:\n os.unlink(f)\n\n def initialize_options(self) -> None:\n pass\n\n def finalize_options(self) -> None:\n pass\n\n\ndef run_antlr(cmd: Command) -> None:\n try:\n cmd.announce(\"Generating parsers with antlr4\", level=distutils.log.INFO)\n cmd.run_command(\"antlr\")\n except OSError as e:\n if e.errno == errno.ENOENT:\n msg = f\"| Unable to generate parsers: {e} |\"\n msg = \"=\" * len(msg) + \"\\n\" + msg + \"\\n\" + \"=\" * len(msg)\n cmd.announce(f\"{msg}\", level=distutils.log.FATAL)\n exit(1)\n else:\n raise\n\n\nclass BuildPyCommand(build_py.build_py):\n def run(self) -> None:\n if not self.dry_run:\n self.run_command(\"clean\")\n run_antlr(self)\n build_py.build_py.run(self)\n\n\nclass Develop(develop.develop):\n def run(self) -> None: # type: ignore\n if not self.dry_run:\n run_antlr(self)\n develop.develop.run(self)\n\n\nclass SDistCommand(sdist.sdist):\n def run(self) -> None:\n if not self.dry_run: # type: ignore\n self.run_command(\"clean\")\n run_antlr(self)\n sdist.sdist.run(self)\n\n\nclass ANTLRCommand(Command): # type: ignore\n \"\"\"Generate parsers using ANTLR.\"\"\"\n\n description = \"Run ANTLR\"\n user_options: List[str] = []\n\n def run(self) -> None:\n \"\"\"Run command.\"\"\"\n root_dir = abspath(dirname(__file__))\n project_root = abspath(dirname(basename(__file__)))\n for grammar in [\n \"hydra/grammar/OverrideLexer.g4\",\n \"hydra/grammar/OverrideParser.g4\",\n ]:\n command = [\n \"java\",\n \"-jar\",\n join(root_dir, \"bin/antlr-4.9.3-complete.jar\"),\n \"-Dlanguage=Python3\",\n \"-o\",\n join(project_root, \"hydra/grammar/gen/\"),\n \"-Xexact-output-dir\",\n \"-visitor\",\n join(project_root, grammar),\n ]\n\n self.announce(\n f\"Generating parser for Python3: {command}\", level=distutils.log.INFO\n )\n\n subprocess.check_call(command)\n\n def initialize_options(self) -> None:\n pass\n\n def finalize_options(self) -> None:\n pass\n", "path": "build_helpers/build_helpers.py"}]}
| 3,298 | 473 |
gh_patches_debug_10172
|
rasdani/github-patches
|
git_diff
|
oppia__oppia-8691
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove gulp from setup entirely.
To continue consolidating our setup process to a python stack, gulp needs to be removed. Currently gulp only provides a watch functionality that starts build processes if some directories are changed. Our dependency on npm already includes access to npm-watch, which can be used to replace gulp.
Remove gulp from setup entirely.
To continue consolidating our setup process to a python stack, gulp needs to be removed. Currently gulp only provides a watch functionality that starts build processes if some directories are changed. Our dependency on npm already includes access to npm-watch, which can be used to replace gulp.
</issue>
<code>
[start of scripts/start.py]
1 # Copyright 2019 The Oppia Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the 'License');
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an 'AS-IS' BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """This script starts up a development server running Oppia. It installs any
16 missing third-party dependencies and starts up a local GAE development
17 server.
18 """
19
20 from __future__ import absolute_import # pylint: disable=import-only-modules
21 from __future__ import unicode_literals # pylint: disable=import-only-modules
22
23 import argparse
24 import atexit
25 import fileinput
26 import os
27 import re
28 import subprocess
29 import time
30
31 # Install third party libraries before importing other files.
32 from . import install_third_party_libs
33 install_third_party_libs.main()
34
35 # pylint: disable=wrong-import-position
36 import python_utils # isort:skip
37
38 from . import build # isort:skip
39 from . import common # isort:skip
40 # pylint: enable=wrong-import-position
41
42 _PARSER = argparse.ArgumentParser(description="""
43 Run the script from the oppia root folder:
44 python -m scripts.start
45 Note that the root folder MUST be named 'oppia'.
46 """)
47
48 _PARSER.add_argument(
49 '--save_datastore',
50 help='optional; if specified, does not clear the datastore.',
51 action='store_true')
52 _PARSER.add_argument(
53 '--enable_console',
54 help='optional; if specified, enables console.',
55 action='store_true')
56 _PARSER.add_argument(
57 '--prod_env',
58 help='optional; if specified, runs Oppia in a production environment.',
59 action='store_true')
60 _PARSER.add_argument(
61 '--no_browser',
62 help='optional; if specified, does not open a browser.',
63 action='store_true')
64 _PARSER.add_argument(
65 '--no_auto_restart',
66 help=(
67 'optional; if specified, does not automatically restart when files are '
68 'changed.'),
69 action='store_true')
70
71 PORT_NUMBER_FOR_GAE_SERVER = 8181
72
73
74 def cleanup():
75 """Function for waiting for the servers to go down."""
76 common.print_each_string_after_two_new_lines([
77 'INFORMATION',
78 'Cleaning up the servers.'])
79 while common.is_port_open(PORT_NUMBER_FOR_GAE_SERVER):
80 time.sleep(1)
81
82
83 def main(args=None):
84 """Starts up a development server running Oppia."""
85 parsed_args = _PARSER.parse_args(args=args)
86
87 # Runs cleanup function on exit.
88 atexit.register(cleanup)
89
90 # Check that there isn't a server already running.
91 if common.is_port_open(PORT_NUMBER_FOR_GAE_SERVER):
92 common.print_each_string_after_two_new_lines([
93 'WARNING',
94 'Could not start new server. There is already an existing server',
95 'running at port %s.'
96 % python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)])
97
98 clear_datastore_arg = (
99 '' if parsed_args.save_datastore else '--clear_datastore=true')
100 enable_console_arg = (
101 '--enable_console=true' if parsed_args.enable_console else '')
102 no_auto_restart = (
103 '--automatic_restart=no' if parsed_args.no_auto_restart else '')
104
105 if parsed_args.prod_env:
106 constants_env_variable = '"DEV_MODE": false'
107 for line in fileinput.input(
108 files=[os.path.join('assets', 'constants.ts')], inplace=True):
109 # Inside this loop the STDOUT will be redirected to the file,
110 # constants.ts. The end='' is needed to avoid double line breaks.
111 python_utils.PRINT(
112 re.sub(
113 r'"DEV_MODE": .*', constants_env_variable, line), end='')
114 build.main(args=['--prod_env'])
115 app_yaml_filepath = 'app.yaml'
116 else:
117 constants_env_variable = '"DEV_MODE": true'
118 for line in fileinput.input(
119 files=[os.path.join('assets', 'constants.ts')], inplace=True):
120 # Inside this loop the STDOUT will be redirected to the file,
121 # constants.ts. The end='' is needed to avoid double line breaks.
122 python_utils.PRINT(
123 re.sub(
124 r'"DEV_MODE": .*', constants_env_variable, line), end='')
125 build.main(args=[])
126 app_yaml_filepath = 'app_dev.yaml'
127
128 # Set up a local dev instance.
129 # TODO(sll): do this in a new shell.
130 # To turn emailing on, add the option '--enable_sendmail=yes' and change the
131 # relevant settings in feconf.py. Be careful with this -- you do not want to
132 # spam people accidentally.
133 background_processes = []
134 if not parsed_args.prod_env:
135 background_processes.append(subprocess.Popen([
136 common.NODE_BIN_PATH,
137 os.path.join(common.NODE_MODULES_PATH, 'gulp', 'bin', 'gulp.js'),
138 'watch']))
139
140 # In prod mode webpack is launched through scripts/build.py
141 python_utils.PRINT('Compiling webpack...')
142 background_processes.append(subprocess.Popen([
143 common.NODE_BIN_PATH,
144 os.path.join(
145 common.NODE_MODULES_PATH, 'webpack', 'bin', 'webpack.js'),
146 '--config', 'webpack.dev.config.ts', '--watch']))
147 # Give webpack few seconds to do the initial compilation.
148 time.sleep(10)
149
150 python_utils.PRINT('Starting GAE development server')
151 background_processes.append(subprocess.Popen(
152 'python %s/dev_appserver.py %s %s %s --admin_host 0.0.0.0 --admin_port '
153 '8000 --host 0.0.0.0 --port %s --skip_sdk_update_check true %s' % (
154 common.GOOGLE_APP_ENGINE_HOME, clear_datastore_arg,
155 enable_console_arg, no_auto_restart,
156 python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER),
157 app_yaml_filepath), shell=True))
158
159 # Wait for the servers to come up.
160 while not common.is_port_open(PORT_NUMBER_FOR_GAE_SERVER):
161 time.sleep(1)
162
163 # Launch a browser window.
164 if common.is_linux_os() and not parsed_args.no_browser:
165 detect_virtualbox_pattern = re.compile('.*VBOX.*')
166 if list(filter(
167 detect_virtualbox_pattern.match,
168 os.listdir('/dev/disk/by-id/'))):
169 common.print_each_string_after_two_new_lines([
170 'INFORMATION',
171 'Setting up a local development server. You can access this '
172 'server',
173 'by navigating to localhost:%s in a browser window.'
174 % python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)])
175 else:
176 common.print_each_string_after_two_new_lines([
177 'INFORMATION',
178 'Setting up a local development server at localhost:%s. '
179 % python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER),
180 'Opening a default browser window pointing to this server'])
181 time.sleep(5)
182 background_processes.append(
183 subprocess.Popen([
184 'xdg-open', 'http://localhost:%s/'
185 % python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)]))
186 elif common.is_mac_os() and not parsed_args.no_browser:
187 common.print_each_string_after_two_new_lines([
188 'INFORMATION',
189 'Setting up a local development server at localhost:%s. '
190 % python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER),
191 'Opening a default browser window pointing to this server.'])
192 time.sleep(5)
193 background_processes.append(
194 subprocess.Popen([
195 'open', 'http://localhost:%s/'
196 % python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)]))
197 else:
198 common.print_each_string_after_two_new_lines([
199 'INFORMATION',
200 'Setting up a local development server. You can access this server',
201 'by navigating to localhost:%s in a browser window.'
202 % python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)])
203
204 python_utils.PRINT('Done!')
205
206 for process in background_processes:
207 process.wait()
208
209
210 if __name__ == '__main__':
211 main()
212
[end of scripts/start.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scripts/start.py b/scripts/start.py
--- a/scripts/start.py
+++ b/scripts/start.py
@@ -132,11 +132,6 @@
# spam people accidentally.
background_processes = []
if not parsed_args.prod_env:
- background_processes.append(subprocess.Popen([
- common.NODE_BIN_PATH,
- os.path.join(common.NODE_MODULES_PATH, 'gulp', 'bin', 'gulp.js'),
- 'watch']))
-
# In prod mode webpack is launched through scripts/build.py
python_utils.PRINT('Compiling webpack...')
background_processes.append(subprocess.Popen([
|
{"golden_diff": "diff --git a/scripts/start.py b/scripts/start.py\n--- a/scripts/start.py\n+++ b/scripts/start.py\n@@ -132,11 +132,6 @@\n # spam people accidentally.\n background_processes = []\n if not parsed_args.prod_env:\n- background_processes.append(subprocess.Popen([\n- common.NODE_BIN_PATH,\n- os.path.join(common.NODE_MODULES_PATH, 'gulp', 'bin', 'gulp.js'),\n- 'watch']))\n-\n # In prod mode webpack is launched through scripts/build.py\n python_utils.PRINT('Compiling webpack...')\n background_processes.append(subprocess.Popen([\n", "issue": "Remove gulp from setup entirely.\nTo continue consolidating our setup process to a python stack, gulp needs to be removed. Currently gulp only provides a watch functionality that starts build processes if some directories are changed. Our dependency on npm already includes access to npm-watch, which can be used to replace gulp.\nRemove gulp from setup entirely.\nTo continue consolidating our setup process to a python stack, gulp needs to be removed. Currently gulp only provides a watch functionality that starts build processes if some directories are changed. Our dependency on npm already includes access to npm-watch, which can be used to replace gulp.\n", "before_files": [{"content": "# Copyright 2019 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS-IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This script starts up a development server running Oppia. It installs any\nmissing third-party dependencies and starts up a local GAE development\nserver.\n\"\"\"\n\nfrom __future__ import absolute_import # pylint: disable=import-only-modules\nfrom __future__ import unicode_literals # pylint: disable=import-only-modules\n\nimport argparse\nimport atexit\nimport fileinput\nimport os\nimport re\nimport subprocess\nimport time\n\n# Install third party libraries before importing other files.\nfrom . import install_third_party_libs\ninstall_third_party_libs.main()\n\n# pylint: disable=wrong-import-position\nimport python_utils # isort:skip\n\nfrom . import build # isort:skip\nfrom . import common # isort:skip\n# pylint: enable=wrong-import-position\n\n_PARSER = argparse.ArgumentParser(description=\"\"\"\nRun the script from the oppia root folder:\n python -m scripts.start\nNote that the root folder MUST be named 'oppia'.\n\"\"\")\n\n_PARSER.add_argument(\n '--save_datastore',\n help='optional; if specified, does not clear the datastore.',\n action='store_true')\n_PARSER.add_argument(\n '--enable_console',\n help='optional; if specified, enables console.',\n action='store_true')\n_PARSER.add_argument(\n '--prod_env',\n help='optional; if specified, runs Oppia in a production environment.',\n action='store_true')\n_PARSER.add_argument(\n '--no_browser',\n help='optional; if specified, does not open a browser.',\n action='store_true')\n_PARSER.add_argument(\n '--no_auto_restart',\n help=(\n 'optional; if specified, does not automatically restart when files are '\n 'changed.'),\n action='store_true')\n\nPORT_NUMBER_FOR_GAE_SERVER = 8181\n\n\ndef cleanup():\n \"\"\"Function for waiting for the servers to go down.\"\"\"\n common.print_each_string_after_two_new_lines([\n 'INFORMATION',\n 'Cleaning up the servers.'])\n while common.is_port_open(PORT_NUMBER_FOR_GAE_SERVER):\n time.sleep(1)\n\n\ndef main(args=None):\n \"\"\"Starts up a development server running Oppia.\"\"\"\n parsed_args = _PARSER.parse_args(args=args)\n\n # Runs cleanup function on exit.\n atexit.register(cleanup)\n\n # Check that there isn't a server already running.\n if common.is_port_open(PORT_NUMBER_FOR_GAE_SERVER):\n common.print_each_string_after_two_new_lines([\n 'WARNING',\n 'Could not start new server. There is already an existing server',\n 'running at port %s.'\n % python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)])\n\n clear_datastore_arg = (\n '' if parsed_args.save_datastore else '--clear_datastore=true')\n enable_console_arg = (\n '--enable_console=true' if parsed_args.enable_console else '')\n no_auto_restart = (\n '--automatic_restart=no' if parsed_args.no_auto_restart else '')\n\n if parsed_args.prod_env:\n constants_env_variable = '\"DEV_MODE\": false'\n for line in fileinput.input(\n files=[os.path.join('assets', 'constants.ts')], inplace=True):\n # Inside this loop the STDOUT will be redirected to the file,\n # constants.ts. The end='' is needed to avoid double line breaks.\n python_utils.PRINT(\n re.sub(\n r'\"DEV_MODE\": .*', constants_env_variable, line), end='')\n build.main(args=['--prod_env'])\n app_yaml_filepath = 'app.yaml'\n else:\n constants_env_variable = '\"DEV_MODE\": true'\n for line in fileinput.input(\n files=[os.path.join('assets', 'constants.ts')], inplace=True):\n # Inside this loop the STDOUT will be redirected to the file,\n # constants.ts. The end='' is needed to avoid double line breaks.\n python_utils.PRINT(\n re.sub(\n r'\"DEV_MODE\": .*', constants_env_variable, line), end='')\n build.main(args=[])\n app_yaml_filepath = 'app_dev.yaml'\n\n # Set up a local dev instance.\n # TODO(sll): do this in a new shell.\n # To turn emailing on, add the option '--enable_sendmail=yes' and change the\n # relevant settings in feconf.py. Be careful with this -- you do not want to\n # spam people accidentally.\n background_processes = []\n if not parsed_args.prod_env:\n background_processes.append(subprocess.Popen([\n common.NODE_BIN_PATH,\n os.path.join(common.NODE_MODULES_PATH, 'gulp', 'bin', 'gulp.js'),\n 'watch']))\n\n # In prod mode webpack is launched through scripts/build.py\n python_utils.PRINT('Compiling webpack...')\n background_processes.append(subprocess.Popen([\n common.NODE_BIN_PATH,\n os.path.join(\n common.NODE_MODULES_PATH, 'webpack', 'bin', 'webpack.js'),\n '--config', 'webpack.dev.config.ts', '--watch']))\n # Give webpack few seconds to do the initial compilation.\n time.sleep(10)\n\n python_utils.PRINT('Starting GAE development server')\n background_processes.append(subprocess.Popen(\n 'python %s/dev_appserver.py %s %s %s --admin_host 0.0.0.0 --admin_port '\n '8000 --host 0.0.0.0 --port %s --skip_sdk_update_check true %s' % (\n common.GOOGLE_APP_ENGINE_HOME, clear_datastore_arg,\n enable_console_arg, no_auto_restart,\n python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER),\n app_yaml_filepath), shell=True))\n\n # Wait for the servers to come up.\n while not common.is_port_open(PORT_NUMBER_FOR_GAE_SERVER):\n time.sleep(1)\n\n # Launch a browser window.\n if common.is_linux_os() and not parsed_args.no_browser:\n detect_virtualbox_pattern = re.compile('.*VBOX.*')\n if list(filter(\n detect_virtualbox_pattern.match,\n os.listdir('/dev/disk/by-id/'))):\n common.print_each_string_after_two_new_lines([\n 'INFORMATION',\n 'Setting up a local development server. You can access this '\n 'server',\n 'by navigating to localhost:%s in a browser window.'\n % python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)])\n else:\n common.print_each_string_after_two_new_lines([\n 'INFORMATION',\n 'Setting up a local development server at localhost:%s. '\n % python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER),\n 'Opening a default browser window pointing to this server'])\n time.sleep(5)\n background_processes.append(\n subprocess.Popen([\n 'xdg-open', 'http://localhost:%s/'\n % python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)]))\n elif common.is_mac_os() and not parsed_args.no_browser:\n common.print_each_string_after_two_new_lines([\n 'INFORMATION',\n 'Setting up a local development server at localhost:%s. '\n % python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER),\n 'Opening a default browser window pointing to this server.'])\n time.sleep(5)\n background_processes.append(\n subprocess.Popen([\n 'open', 'http://localhost:%s/'\n % python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)]))\n else:\n common.print_each_string_after_two_new_lines([\n 'INFORMATION',\n 'Setting up a local development server. You can access this server',\n 'by navigating to localhost:%s in a browser window.'\n % python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)])\n\n python_utils.PRINT('Done!')\n\n for process in background_processes:\n process.wait()\n\n\nif __name__ == '__main__':\n main()\n", "path": "scripts/start.py"}]}
| 2,964 | 131 |
gh_patches_debug_37817
|
rasdani/github-patches
|
git_diff
|
Rapptz__discord.py-2162
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tasks] Run-time changing of the sleep interval
### The Problem
No way to change the sleep interval at run-time.
### The Ideal Solution
Something like `task.change_interval(seconds=20.0)`.
It should apply on the next loop. If someone wants to do it right away regardless of the current loop they can cancel it, change the interval, then start it again.
### The Current Solution
None
</issue>
<code>
[start of discord/ext/tasks/__init__.py]
1 import asyncio
2 import aiohttp
3 import websockets
4 import discord
5 import inspect
6 import logging
7
8 from discord.backoff import ExponentialBackoff
9
10 MAX_ASYNCIO_SECONDS = 3456000
11
12 log = logging.getLogger(__name__)
13
14 class Loop:
15 """A background task helper that abstracts the loop and reconnection logic for you.
16
17 The main interface to create this is through :func:`loop`.
18 """
19 def __init__(self, coro, seconds, hours, minutes, count, reconnect, loop):
20 self.coro = coro
21 self.seconds = seconds
22 self.hours = hours
23 self.minutes = minutes
24 self.reconnect = reconnect
25 self.loop = loop or asyncio.get_event_loop()
26 self.count = count
27 self._current_loop = 0
28 self._task = None
29 self._injected = None
30 self._valid_exception = (
31 OSError,
32 discord.HTTPException,
33 discord.GatewayNotFound,
34 discord.ConnectionClosed,
35 aiohttp.ClientError,
36 asyncio.TimeoutError,
37 websockets.InvalidHandshake,
38 websockets.WebSocketProtocolError,
39 )
40
41 self._before_loop = None
42 self._after_loop = None
43 self._is_being_cancelled = False
44 self._has_failed = False
45 self._stop_next_iteration = False
46
47 if self.count is not None and self.count <= 0:
48 raise ValueError('count must be greater than 0 or None.')
49
50 self._sleep = sleep = self.seconds + (self.minutes * 60.0) + (self.hours * 3600.0)
51 if sleep >= MAX_ASYNCIO_SECONDS:
52 fmt = 'Total number of seconds exceeds asyncio imposed limit of {0} seconds.'
53 raise ValueError(fmt.format(MAX_ASYNCIO_SECONDS))
54
55 if sleep < 0:
56 raise ValueError('Total number of seconds cannot be less than zero.')
57
58 if not inspect.iscoroutinefunction(self.coro):
59 raise TypeError('Expected coroutine function, not {0.__name__!r}.'.format(type(self.coro)))
60
61 async def _call_loop_function(self, name):
62 coro = getattr(self, '_' + name)
63 if coro is None:
64 return
65
66 if self._injected is not None:
67 await coro(self._injected)
68 else:
69 await coro()
70
71 async def _loop(self, *args, **kwargs):
72 backoff = ExponentialBackoff()
73 await self._call_loop_function('before_loop')
74 try:
75 while True:
76 try:
77 await self.coro(*args, **kwargs)
78 except self._valid_exception as exc:
79 if not self.reconnect:
80 raise
81 await asyncio.sleep(backoff.delay())
82 else:
83 if self._stop_next_iteration:
84 return
85 self._current_loop += 1
86 if self._current_loop == self.count:
87 break
88
89 await asyncio.sleep(self._sleep)
90 except asyncio.CancelledError:
91 self._is_being_cancelled = True
92 raise
93 except Exception:
94 self._has_failed = True
95 log.exception('Internal background task failed.')
96 raise
97 finally:
98 await self._call_loop_function('after_loop')
99 self._is_being_cancelled = False
100 self._current_loop = 0
101 self._stop_next_iteration = False
102 self._has_failed = False
103
104 def __get__(self, obj, objtype):
105 if obj is None:
106 return self
107 self._injected = obj
108 return self
109
110 @property
111 def current_loop(self):
112 """:class:`int`: The current iteration of the loop."""
113 return self._current_loop
114
115 def start(self, *args, **kwargs):
116 r"""Starts the internal task in the event loop.
117
118 Parameters
119 ------------
120 \*args
121 The arguments to to use.
122 \*\*kwargs
123 The keyword arguments to use.
124
125 Raises
126 --------
127 RuntimeError
128 A task has already been launched and is running.
129
130 Returns
131 ---------
132 :class:`asyncio.Task`
133 The task that has been created.
134 """
135
136 if self._task is not None and not self._task.done():
137 raise RuntimeError('Task is already launched and is not completed.')
138
139 if self._injected is not None:
140 args = (self._injected, *args)
141
142 self._task = self.loop.create_task(self._loop(*args, **kwargs))
143 return self._task
144
145 def stop(self):
146 r"""Gracefully stops the task from running.
147
148 Unlike :meth:`cancel`\, this allows the task to finish its
149 current iteration before gracefully exiting.
150
151 .. note::
152
153 If the internal function raises an error that can be
154 handled before finishing then it will retry until
155 it succeeds.
156
157 If this is undesirable, either remove the error handling
158 before stopping via :meth:`clear_exception_types` or
159 use :meth:`cancel` instead.
160
161 .. versionadded:: 1.2.0
162 """
163 if self._task and not self._task.done():
164 self._stop_next_iteration = True
165
166 def _can_be_cancelled(self):
167 return not self._is_being_cancelled and self._task and not self._task.done()
168
169 def cancel(self):
170 """Cancels the internal task, if it is running."""
171 if self._can_be_cancelled():
172 self._task.cancel()
173
174 def restart(self, *args, **kwargs):
175 r"""A convenience method to restart the internal task.
176
177 .. note::
178
179 Due to the way this function works, the task is not
180 returned like :meth:`start`.
181
182 Parameters
183 ------------
184 \*args
185 The arguments to to use.
186 \*\*kwargs
187 The keyword arguments to use.
188 """
189
190 def restart_when_over(fut, *, args=args, kwargs=kwargs):
191 self._task.remove_done_callback(restart_when_over)
192 self.start(*args, **kwargs)
193
194 if self._can_be_cancelled():
195 self._task.add_done_callback(restart_when_over)
196 self._task.cancel()
197
198 def add_exception_type(self, exc):
199 r"""Adds an exception type to be handled during the reconnect logic.
200
201 By default the exception types handled are those handled by
202 :meth:`discord.Client.connect`\, which includes a lot of internet disconnection
203 errors.
204
205 This function is useful if you're interacting with a 3rd party library that
206 raises its own set of exceptions.
207
208 Parameters
209 ------------
210 exc: Type[:class:`BaseException`]
211 The exception class to handle.
212
213 Raises
214 --------
215 TypeError
216 The exception passed is either not a class or not inherited from :class:`BaseException`.
217 """
218
219 if not inspect.isclass(exc):
220 raise TypeError('{0!r} must be a class.'.format(exc))
221 if not issubclass(exc, BaseException):
222 raise TypeError('{0!r} must inherit from BaseException.'.format(exc))
223
224 self._valid_exception = (*self._valid_exception, exc)
225
226 def clear_exception_types(self):
227 """Removes all exception types that are handled.
228
229 .. note::
230
231 This operation obviously cannot be undone!
232 """
233 self._valid_exception = tuple()
234
235 def remove_exception_type(self, exc):
236 """Removes an exception type from being handled during the reconnect logic.
237
238 Parameters
239 ------------
240 exc: Type[:class:`BaseException`]
241 The exception class to handle.
242
243 Returns
244 ---------
245 :class:`bool`
246 Whether it was successfully removed.
247 """
248 old_length = len(self._valid_exception)
249 self._valid_exception = tuple(x for x in self._valid_exception if x is not exc)
250 return len(self._valid_exception) != old_length
251
252 def get_task(self):
253 """Optional[:class:`asyncio.Task`]: Fetches the internal task or ``None`` if there isn't one running."""
254 return self._task
255
256 def is_being_cancelled(self):
257 """:class:`bool`: Whether the task is being cancelled."""
258 return self._is_being_cancelled
259
260 def failed(self):
261 """:class:`bool`: Whether the internal task has failed.
262
263 .. versionadded:: 1.2.0
264 """
265 return self._has_failed
266
267 def before_loop(self, coro):
268 """A decorator that registers a coroutine to be called before the loop starts running.
269
270 This is useful if you want to wait for some bot state before the loop starts,
271 such as :meth:`discord.Client.wait_until_ready`.
272
273 The coroutine must take no arguments (except ``self`` in a class context).
274
275 Parameters
276 ------------
277 coro: :ref:`coroutine <coroutine>`
278 The coroutine to register before the loop runs.
279
280 Raises
281 -------
282 TypeError
283 The function was not a coroutine.
284 """
285
286 if not inspect.iscoroutinefunction(coro):
287 raise TypeError('Expected coroutine function, received {0.__name__!r}.'.format(type(coro)))
288
289 self._before_loop = coro
290 return coro
291
292 def after_loop(self, coro):
293 """A decorator that register a coroutine to be called after the loop finished running.
294
295 The coroutine must take no arguments (except ``self`` in a class context).
296
297 .. note::
298
299 This coroutine is called even during cancellation. If it is desirable
300 to tell apart whether something was cancelled or not, check to see
301 whether :meth:`is_being_cancelled` is ``True`` or not.
302
303 Parameters
304 ------------
305 coro: :ref:`coroutine <coroutine>`
306 The coroutine to register after the loop finishes.
307
308 Raises
309 -------
310 TypeError
311 The function was not a coroutine.
312 """
313
314 if not inspect.iscoroutinefunction(coro):
315 raise TypeError('Expected coroutine function, received {0.__name__!r}.'.format(type(coro)))
316
317 self._after_loop = coro
318 return coro
319
320 def loop(*, seconds=0, minutes=0, hours=0, count=None, reconnect=True, loop=None):
321 """A decorator that schedules a task in the background for you with
322 optional reconnect logic.
323
324 Parameters
325 ------------
326 seconds: :class:`float`
327 The number of seconds between every iteration.
328 minutes: :class:`float`
329 The number of minutes between every iteration.
330 hours: :class:`float`
331 The number of hours between every iteration.
332 count: Optional[:class:`int`]
333 The number of loops to do, ``None`` if it should be an
334 infinite loop.
335 reconnect: :class:`bool`
336 Whether to handle errors and restart the task
337 using an exponential back-off algorithm similar to the
338 one used in :meth:`discord.Client.connect`.
339 loop: :class:`asyncio.AbstractEventLoop`
340 The loop to use to register the task, if not given
341 defaults to :func:`asyncio.get_event_loop`.
342
343 Raises
344 --------
345 ValueError
346 An invalid value was given.
347 TypeError
348 The function was not a coroutine.
349
350 Returns
351 ---------
352 :class:`Loop`
353 The loop helper that handles the background task.
354 """
355 def decorator(func):
356 return Loop(func, seconds=seconds, minutes=minutes, hours=hours,
357 count=count, reconnect=reconnect, loop=loop)
358 return decorator
359
[end of discord/ext/tasks/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/discord/ext/tasks/__init__.py b/discord/ext/tasks/__init__.py
--- a/discord/ext/tasks/__init__.py
+++ b/discord/ext/tasks/__init__.py
@@ -18,9 +18,6 @@
"""
def __init__(self, coro, seconds, hours, minutes, count, reconnect, loop):
self.coro = coro
- self.seconds = seconds
- self.hours = hours
- self.minutes = minutes
self.reconnect = reconnect
self.loop = loop or asyncio.get_event_loop()
self.count = count
@@ -47,13 +44,7 @@
if self.count is not None and self.count <= 0:
raise ValueError('count must be greater than 0 or None.')
- self._sleep = sleep = self.seconds + (self.minutes * 60.0) + (self.hours * 3600.0)
- if sleep >= MAX_ASYNCIO_SECONDS:
- fmt = 'Total number of seconds exceeds asyncio imposed limit of {0} seconds.'
- raise ValueError(fmt.format(MAX_ASYNCIO_SECONDS))
-
- if sleep < 0:
- raise ValueError('Total number of seconds cannot be less than zero.')
+ self.change_interval(seconds=seconds, minutes=minutes, hours=hours)
if not inspect.iscoroutinefunction(self.coro):
raise TypeError('Expected coroutine function, not {0.__name__!r}.'.format(type(self.coro)))
@@ -317,6 +308,42 @@
self._after_loop = coro
return coro
+ def change_interval(self, *, seconds=0, minutes=0, hours=0):
+ """Changes the interval for the sleep time.
+
+ .. note::
+
+ This only applies on the next loop iteration. If it is desirable for the change of interval
+ to be applied right away, cancel the task with :meth:`cancel`.
+
+ Parameters
+ ------------
+ seconds: :class:`float`
+ The number of seconds between every iteration.
+ minutes: :class:`float`
+ The number of minutes between every iteration.
+ hours: :class:`float`
+ The number of hours between every iteration.
+
+ Raises
+ -------
+ ValueError
+ An invalid value was given.
+ """
+
+ sleep = seconds + (minutes * 60.0) + (hours * 3600.0)
+ if sleep >= MAX_ASYNCIO_SECONDS:
+ fmt = 'Total number of seconds exceeds asyncio imposed limit of {0} seconds.'
+ raise ValueError(fmt.format(MAX_ASYNCIO_SECONDS))
+
+ if sleep < 0:
+ raise ValueError('Total number of seconds cannot be less than zero.')
+
+ self._sleep = sleep
+ self.seconds = seconds
+ self.hours = hours
+ self.minutes = minutes
+
def loop(*, seconds=0, minutes=0, hours=0, count=None, reconnect=True, loop=None):
"""A decorator that schedules a task in the background for you with
optional reconnect logic.
|
{"golden_diff": "diff --git a/discord/ext/tasks/__init__.py b/discord/ext/tasks/__init__.py\n--- a/discord/ext/tasks/__init__.py\n+++ b/discord/ext/tasks/__init__.py\n@@ -18,9 +18,6 @@\n \"\"\"\n def __init__(self, coro, seconds, hours, minutes, count, reconnect, loop):\n self.coro = coro\n- self.seconds = seconds\n- self.hours = hours\n- self.minutes = minutes\n self.reconnect = reconnect\n self.loop = loop or asyncio.get_event_loop()\n self.count = count\n@@ -47,13 +44,7 @@\n if self.count is not None and self.count <= 0:\n raise ValueError('count must be greater than 0 or None.')\n \n- self._sleep = sleep = self.seconds + (self.minutes * 60.0) + (self.hours * 3600.0)\n- if sleep >= MAX_ASYNCIO_SECONDS:\n- fmt = 'Total number of seconds exceeds asyncio imposed limit of {0} seconds.'\n- raise ValueError(fmt.format(MAX_ASYNCIO_SECONDS))\n-\n- if sleep < 0:\n- raise ValueError('Total number of seconds cannot be less than zero.')\n+ self.change_interval(seconds=seconds, minutes=minutes, hours=hours)\n \n if not inspect.iscoroutinefunction(self.coro):\n raise TypeError('Expected coroutine function, not {0.__name__!r}.'.format(type(self.coro)))\n@@ -317,6 +308,42 @@\n self._after_loop = coro\n return coro\n \n+ def change_interval(self, *, seconds=0, minutes=0, hours=0):\n+ \"\"\"Changes the interval for the sleep time.\n+\n+ .. note::\n+\n+ This only applies on the next loop iteration. If it is desirable for the change of interval\n+ to be applied right away, cancel the task with :meth:`cancel`.\n+\n+ Parameters\n+ ------------\n+ seconds: :class:`float`\n+ The number of seconds between every iteration.\n+ minutes: :class:`float`\n+ The number of minutes between every iteration.\n+ hours: :class:`float`\n+ The number of hours between every iteration.\n+\n+ Raises\n+ -------\n+ ValueError\n+ An invalid value was given.\n+ \"\"\"\n+\n+ sleep = seconds + (minutes * 60.0) + (hours * 3600.0)\n+ if sleep >= MAX_ASYNCIO_SECONDS:\n+ fmt = 'Total number of seconds exceeds asyncio imposed limit of {0} seconds.'\n+ raise ValueError(fmt.format(MAX_ASYNCIO_SECONDS))\n+\n+ if sleep < 0:\n+ raise ValueError('Total number of seconds cannot be less than zero.')\n+\n+ self._sleep = sleep\n+ self.seconds = seconds\n+ self.hours = hours\n+ self.minutes = minutes\n+\n def loop(*, seconds=0, minutes=0, hours=0, count=None, reconnect=True, loop=None):\n \"\"\"A decorator that schedules a task in the background for you with\n optional reconnect logic.\n", "issue": "[tasks] Run-time changing of the sleep interval\n### The Problem\r\n\r\nNo way to change the sleep interval at run-time.\r\n\r\n### The Ideal Solution\r\n\r\nSomething like `task.change_interval(seconds=20.0)`. \r\n\r\nIt should apply on the next loop. If someone wants to do it right away regardless of the current loop they can cancel it, change the interval, then start it again.\r\n\r\n### The Current Solution\r\n\r\nNone\n", "before_files": [{"content": "import asyncio\nimport aiohttp\nimport websockets\nimport discord\nimport inspect\nimport logging\n\nfrom discord.backoff import ExponentialBackoff\n\nMAX_ASYNCIO_SECONDS = 3456000\n\nlog = logging.getLogger(__name__)\n\nclass Loop:\n \"\"\"A background task helper that abstracts the loop and reconnection logic for you.\n\n The main interface to create this is through :func:`loop`.\n \"\"\"\n def __init__(self, coro, seconds, hours, minutes, count, reconnect, loop):\n self.coro = coro\n self.seconds = seconds\n self.hours = hours\n self.minutes = minutes\n self.reconnect = reconnect\n self.loop = loop or asyncio.get_event_loop()\n self.count = count\n self._current_loop = 0\n self._task = None\n self._injected = None\n self._valid_exception = (\n OSError,\n discord.HTTPException,\n discord.GatewayNotFound,\n discord.ConnectionClosed,\n aiohttp.ClientError,\n asyncio.TimeoutError,\n websockets.InvalidHandshake,\n websockets.WebSocketProtocolError,\n )\n\n self._before_loop = None\n self._after_loop = None\n self._is_being_cancelled = False\n self._has_failed = False\n self._stop_next_iteration = False\n\n if self.count is not None and self.count <= 0:\n raise ValueError('count must be greater than 0 or None.')\n\n self._sleep = sleep = self.seconds + (self.minutes * 60.0) + (self.hours * 3600.0)\n if sleep >= MAX_ASYNCIO_SECONDS:\n fmt = 'Total number of seconds exceeds asyncio imposed limit of {0} seconds.'\n raise ValueError(fmt.format(MAX_ASYNCIO_SECONDS))\n\n if sleep < 0:\n raise ValueError('Total number of seconds cannot be less than zero.')\n\n if not inspect.iscoroutinefunction(self.coro):\n raise TypeError('Expected coroutine function, not {0.__name__!r}.'.format(type(self.coro)))\n\n async def _call_loop_function(self, name):\n coro = getattr(self, '_' + name)\n if coro is None:\n return\n\n if self._injected is not None:\n await coro(self._injected)\n else:\n await coro()\n\n async def _loop(self, *args, **kwargs):\n backoff = ExponentialBackoff()\n await self._call_loop_function('before_loop')\n try:\n while True:\n try:\n await self.coro(*args, **kwargs)\n except self._valid_exception as exc:\n if not self.reconnect:\n raise\n await asyncio.sleep(backoff.delay())\n else:\n if self._stop_next_iteration:\n return\n self._current_loop += 1\n if self._current_loop == self.count:\n break\n\n await asyncio.sleep(self._sleep)\n except asyncio.CancelledError:\n self._is_being_cancelled = True\n raise\n except Exception:\n self._has_failed = True\n log.exception('Internal background task failed.')\n raise\n finally:\n await self._call_loop_function('after_loop')\n self._is_being_cancelled = False\n self._current_loop = 0\n self._stop_next_iteration = False\n self._has_failed = False\n\n def __get__(self, obj, objtype):\n if obj is None:\n return self\n self._injected = obj\n return self\n\n @property\n def current_loop(self):\n \"\"\":class:`int`: The current iteration of the loop.\"\"\"\n return self._current_loop\n\n def start(self, *args, **kwargs):\n r\"\"\"Starts the internal task in the event loop.\n\n Parameters\n ------------\n \\*args\n The arguments to to use.\n \\*\\*kwargs\n The keyword arguments to use.\n\n Raises\n --------\n RuntimeError\n A task has already been launched and is running.\n\n Returns\n ---------\n :class:`asyncio.Task`\n The task that has been created.\n \"\"\"\n\n if self._task is not None and not self._task.done():\n raise RuntimeError('Task is already launched and is not completed.')\n\n if self._injected is not None:\n args = (self._injected, *args)\n\n self._task = self.loop.create_task(self._loop(*args, **kwargs))\n return self._task\n\n def stop(self):\n r\"\"\"Gracefully stops the task from running.\n\n Unlike :meth:`cancel`\\, this allows the task to finish its\n current iteration before gracefully exiting.\n\n .. note::\n\n If the internal function raises an error that can be\n handled before finishing then it will retry until\n it succeeds.\n\n If this is undesirable, either remove the error handling\n before stopping via :meth:`clear_exception_types` or\n use :meth:`cancel` instead.\n\n .. versionadded:: 1.2.0\n \"\"\"\n if self._task and not self._task.done():\n self._stop_next_iteration = True\n\n def _can_be_cancelled(self):\n return not self._is_being_cancelled and self._task and not self._task.done()\n\n def cancel(self):\n \"\"\"Cancels the internal task, if it is running.\"\"\"\n if self._can_be_cancelled():\n self._task.cancel()\n\n def restart(self, *args, **kwargs):\n r\"\"\"A convenience method to restart the internal task.\n\n .. note::\n\n Due to the way this function works, the task is not\n returned like :meth:`start`.\n\n Parameters\n ------------\n \\*args\n The arguments to to use.\n \\*\\*kwargs\n The keyword arguments to use.\n \"\"\"\n\n def restart_when_over(fut, *, args=args, kwargs=kwargs):\n self._task.remove_done_callback(restart_when_over)\n self.start(*args, **kwargs)\n\n if self._can_be_cancelled():\n self._task.add_done_callback(restart_when_over)\n self._task.cancel()\n\n def add_exception_type(self, exc):\n r\"\"\"Adds an exception type to be handled during the reconnect logic.\n\n By default the exception types handled are those handled by\n :meth:`discord.Client.connect`\\, which includes a lot of internet disconnection\n errors.\n\n This function is useful if you're interacting with a 3rd party library that\n raises its own set of exceptions.\n\n Parameters\n ------------\n exc: Type[:class:`BaseException`]\n The exception class to handle.\n\n Raises\n --------\n TypeError\n The exception passed is either not a class or not inherited from :class:`BaseException`.\n \"\"\"\n\n if not inspect.isclass(exc):\n raise TypeError('{0!r} must be a class.'.format(exc))\n if not issubclass(exc, BaseException):\n raise TypeError('{0!r} must inherit from BaseException.'.format(exc))\n\n self._valid_exception = (*self._valid_exception, exc)\n\n def clear_exception_types(self):\n \"\"\"Removes all exception types that are handled.\n\n .. note::\n\n This operation obviously cannot be undone!\n \"\"\"\n self._valid_exception = tuple()\n\n def remove_exception_type(self, exc):\n \"\"\"Removes an exception type from being handled during the reconnect logic.\n\n Parameters\n ------------\n exc: Type[:class:`BaseException`]\n The exception class to handle.\n\n Returns\n ---------\n :class:`bool`\n Whether it was successfully removed.\n \"\"\"\n old_length = len(self._valid_exception)\n self._valid_exception = tuple(x for x in self._valid_exception if x is not exc)\n return len(self._valid_exception) != old_length\n\n def get_task(self):\n \"\"\"Optional[:class:`asyncio.Task`]: Fetches the internal task or ``None`` if there isn't one running.\"\"\"\n return self._task\n\n def is_being_cancelled(self):\n \"\"\":class:`bool`: Whether the task is being cancelled.\"\"\"\n return self._is_being_cancelled\n\n def failed(self):\n \"\"\":class:`bool`: Whether the internal task has failed.\n\n .. versionadded:: 1.2.0\n \"\"\"\n return self._has_failed\n\n def before_loop(self, coro):\n \"\"\"A decorator that registers a coroutine to be called before the loop starts running.\n\n This is useful if you want to wait for some bot state before the loop starts,\n such as :meth:`discord.Client.wait_until_ready`.\n\n The coroutine must take no arguments (except ``self`` in a class context).\n\n Parameters\n ------------\n coro: :ref:`coroutine <coroutine>`\n The coroutine to register before the loop runs.\n\n Raises\n -------\n TypeError\n The function was not a coroutine.\n \"\"\"\n\n if not inspect.iscoroutinefunction(coro):\n raise TypeError('Expected coroutine function, received {0.__name__!r}.'.format(type(coro)))\n\n self._before_loop = coro\n return coro\n\n def after_loop(self, coro):\n \"\"\"A decorator that register a coroutine to be called after the loop finished running.\n\n The coroutine must take no arguments (except ``self`` in a class context).\n\n .. note::\n\n This coroutine is called even during cancellation. If it is desirable\n to tell apart whether something was cancelled or not, check to see\n whether :meth:`is_being_cancelled` is ``True`` or not.\n\n Parameters\n ------------\n coro: :ref:`coroutine <coroutine>`\n The coroutine to register after the loop finishes.\n\n Raises\n -------\n TypeError\n The function was not a coroutine.\n \"\"\"\n\n if not inspect.iscoroutinefunction(coro):\n raise TypeError('Expected coroutine function, received {0.__name__!r}.'.format(type(coro)))\n\n self._after_loop = coro\n return coro\n\ndef loop(*, seconds=0, minutes=0, hours=0, count=None, reconnect=True, loop=None):\n \"\"\"A decorator that schedules a task in the background for you with\n optional reconnect logic.\n\n Parameters\n ------------\n seconds: :class:`float`\n The number of seconds between every iteration.\n minutes: :class:`float`\n The number of minutes between every iteration.\n hours: :class:`float`\n The number of hours between every iteration.\n count: Optional[:class:`int`]\n The number of loops to do, ``None`` if it should be an\n infinite loop.\n reconnect: :class:`bool`\n Whether to handle errors and restart the task\n using an exponential back-off algorithm similar to the\n one used in :meth:`discord.Client.connect`.\n loop: :class:`asyncio.AbstractEventLoop`\n The loop to use to register the task, if not given\n defaults to :func:`asyncio.get_event_loop`.\n\n Raises\n --------\n ValueError\n An invalid value was given.\n TypeError\n The function was not a coroutine.\n\n Returns\n ---------\n :class:`Loop`\n The loop helper that handles the background task.\n \"\"\"\n def decorator(func):\n return Loop(func, seconds=seconds, minutes=minutes, hours=hours,\n count=count, reconnect=reconnect, loop=loop)\n return decorator\n", "path": "discord/ext/tasks/__init__.py"}]}
| 4,092 | 689 |
gh_patches_debug_123
|
rasdani/github-patches
|
git_diff
|
ResonantGeoData__ResonantGeoData-455
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve VTK.js 3D Viewer
After #406 is merged, we should improve the 3D viewer. Basically, use [this example](https://kitware.github.io/vtk-js/examples/GeometryViewer.html)
Things we should have:
- [x] drop-down menu to change the scalar array
- [x] Scalar bar
- [x] Representation style
- [x] Better background color choice (likely black)
- [x] Point size slider
- [x] Support RGB colors
</issue>
<code>
[start of example_project/rgd_example/settings.py]
1 from rgd_testing_utils.settings import * # noqa
2
3 INSTALLED_APPS += [ # noqa
4 'rgd_3d',
5 'rgd_fmv',
6 'rgd_geometry',
7 'rgd_imagery',
8 # Swagger
9 'drf_yasg',
10 'django_extensions',
11 ]
12
13 ROOT_URLCONF = 'rgd_example.urls'
14 WSGI_APPLICATION = 'rgd_example.wsgi.application'
15
16
17 # Swagger
18 REFETCH_SCHEMA_WITH_AUTH = True
19 REFETCH_SCHEMA_ON_LOGOUT = True
20 OPERATIONS_SORTER = 'alpha'
21 DEEP_LINKING = True
22
[end of example_project/rgd_example/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/example_project/rgd_example/settings.py b/example_project/rgd_example/settings.py
--- a/example_project/rgd_example/settings.py
+++ b/example_project/rgd_example/settings.py
@@ -19,3 +19,5 @@
REFETCH_SCHEMA_ON_LOGOUT = True
OPERATIONS_SORTER = 'alpha'
DEEP_LINKING = True
+
+STATIC_URL = '/static/'
|
{"golden_diff": "diff --git a/example_project/rgd_example/settings.py b/example_project/rgd_example/settings.py\n--- a/example_project/rgd_example/settings.py\n+++ b/example_project/rgd_example/settings.py\n@@ -19,3 +19,5 @@\n REFETCH_SCHEMA_ON_LOGOUT = True\n OPERATIONS_SORTER = 'alpha'\n DEEP_LINKING = True\n+\n+STATIC_URL = '/static/'\n", "issue": "Improve VTK.js 3D Viewer\nAfter #406 is merged, we should improve the 3D viewer. Basically, use [this example](https://kitware.github.io/vtk-js/examples/GeometryViewer.html)\r\n\r\nThings we should have:\r\n\r\n- [x] drop-down menu to change the scalar array\r\n- [x] Scalar bar\r\n- [x] Representation style\r\n- [x] Better background color choice (likely black)\r\n- [x] Point size slider\r\n- [x] Support RGB colors\n", "before_files": [{"content": "from rgd_testing_utils.settings import * # noqa\n\nINSTALLED_APPS += [ # noqa\n 'rgd_3d',\n 'rgd_fmv',\n 'rgd_geometry',\n 'rgd_imagery',\n # Swagger\n 'drf_yasg',\n 'django_extensions',\n]\n\nROOT_URLCONF = 'rgd_example.urls'\nWSGI_APPLICATION = 'rgd_example.wsgi.application'\n\n\n# Swagger\nREFETCH_SCHEMA_WITH_AUTH = True\nREFETCH_SCHEMA_ON_LOGOUT = True\nOPERATIONS_SORTER = 'alpha'\nDEEP_LINKING = True\n", "path": "example_project/rgd_example/settings.py"}]}
| 814 | 88 |
gh_patches_debug_6251
|
rasdani/github-patches
|
git_diff
|
searxng__searxng-83
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: engine "archive is" reports HTTP 404 / Not found
**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**
1970d28a
**Technical report**
Error
* Error: httpx.HTTPStatusError
* Percentage: 100
* Parameters: `('404', 'Not Found', 'archive.is')`
* File name: `searx/search/processors/online.py:99`
* Function: `_send_http_request`
* Code: `response = req(params['url'], **request_args)`
</issue>
<code>
[start of searx/engines/xpath.py]
1 # SPDX-License-Identifier: AGPL-3.0-or-later
2
3 from lxml import html
4 from urllib.parse import urlencode
5 from searx.utils import extract_text, extract_url, eval_xpath, eval_xpath_list
6
7 search_url = None
8 url_xpath = None
9 content_xpath = None
10 title_xpath = None
11 thumbnail_xpath = False
12 paging = False
13 suggestion_xpath = ''
14 results_xpath = ''
15 cached_xpath = ''
16 cached_url = ''
17
18 # parameters for engines with paging support
19 #
20 # number of results on each page
21 # (only needed if the site requires not a page number, but an offset)
22 page_size = 1
23 # number of the first page (usually 0 or 1)
24 first_page_num = 1
25
26
27 def request(query, params):
28 query = urlencode({'q': query})[2:]
29
30 fp = {'query': query}
31 if paging and search_url.find('{pageno}') >= 0:
32 fp['pageno'] = (params['pageno'] - 1) * page_size + first_page_num
33
34 params['url'] = search_url.format(**fp)
35 params['query'] = query
36
37 return params
38
39
40 def response(resp):
41 results = []
42 dom = html.fromstring(resp.text)
43 is_onion = True if 'onions' in categories else False # pylint: disable=undefined-variable
44
45 if results_xpath:
46 for result in eval_xpath_list(dom, results_xpath):
47 url = extract_url(eval_xpath_list(result, url_xpath, min_len=1), search_url)
48 title = extract_text(eval_xpath_list(result, title_xpath, min_len=1))
49 content = extract_text(eval_xpath_list(result, content_xpath, min_len=1))
50 tmp_result = {'url': url, 'title': title, 'content': content}
51
52 # add thumbnail if available
53 if thumbnail_xpath:
54 thumbnail_xpath_result = eval_xpath_list(result, thumbnail_xpath)
55 if len(thumbnail_xpath_result) > 0:
56 tmp_result['img_src'] = extract_url(thumbnail_xpath_result, search_url)
57
58 # add alternative cached url if available
59 if cached_xpath:
60 tmp_result['cached_url'] = cached_url\
61 + extract_text(eval_xpath_list(result, cached_xpath, min_len=1))
62
63 if is_onion:
64 tmp_result['is_onion'] = True
65
66 results.append(tmp_result)
67 else:
68 if cached_xpath:
69 for url, title, content, cached in zip(
70 (extract_url(x, search_url) for
71 x in eval_xpath_list(dom, url_xpath)),
72 map(extract_text, eval_xpath_list(dom, title_xpath)),
73 map(extract_text, eval_xpath_list(dom, content_xpath)),
74 map(extract_text, eval_xpath_list(dom, cached_xpath))
75 ):
76 results.append({'url': url, 'title': title, 'content': content,
77 'cached_url': cached_url + cached, 'is_onion': is_onion})
78 else:
79 for url, title, content in zip(
80 (extract_url(x, search_url) for
81 x in eval_xpath_list(dom, url_xpath)),
82 map(extract_text, eval_xpath_list(dom, title_xpath)),
83 map(extract_text, eval_xpath_list(dom, content_xpath))
84 ):
85 results.append({'url': url, 'title': title, 'content': content, 'is_onion': is_onion})
86
87 if not suggestion_xpath:
88 return results
89 for suggestion in eval_xpath(dom, suggestion_xpath):
90 results.append({'suggestion': extract_text(suggestion)})
91 return results
92
[end of searx/engines/xpath.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/searx/engines/xpath.py b/searx/engines/xpath.py
--- a/searx/engines/xpath.py
+++ b/searx/engines/xpath.py
@@ -14,6 +14,7 @@
results_xpath = ''
cached_xpath = ''
cached_url = ''
+soft_max_redirects = 0
# parameters for engines with paging support
#
@@ -33,6 +34,7 @@
params['url'] = search_url.format(**fp)
params['query'] = query
+ params['soft_max_redirects'] = soft_max_redirects
return params
|
{"golden_diff": "diff --git a/searx/engines/xpath.py b/searx/engines/xpath.py\n--- a/searx/engines/xpath.py\n+++ b/searx/engines/xpath.py\n@@ -14,6 +14,7 @@\n results_xpath = ''\n cached_xpath = ''\n cached_url = ''\n+soft_max_redirects = 0\n \n # parameters for engines with paging support\n #\n@@ -33,6 +34,7 @@\n \n params['url'] = search_url.format(**fp)\n params['query'] = query\n+ params['soft_max_redirects'] = soft_max_redirects\n \n return params\n", "issue": "Bug: engine \"archive is\" reports HTTP 404 / Not found\n**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**\r\n\r\n1970d28a\r\n\r\n**Technical report**\r\n\r\nError\r\n * Error: httpx.HTTPStatusError\r\n * Percentage: 100\r\n * Parameters: `('404', 'Not Found', 'archive.is')`\r\n * File name: `searx/search/processors/online.py:99`\r\n * Function: `_send_http_request`\r\n * Code: `response = req(params['url'], **request_args)`\r\n\r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\nfrom lxml import html\nfrom urllib.parse import urlencode\nfrom searx.utils import extract_text, extract_url, eval_xpath, eval_xpath_list\n\nsearch_url = None\nurl_xpath = None\ncontent_xpath = None\ntitle_xpath = None\nthumbnail_xpath = False\npaging = False\nsuggestion_xpath = ''\nresults_xpath = ''\ncached_xpath = ''\ncached_url = ''\n\n# parameters for engines with paging support\n#\n# number of results on each page\n# (only needed if the site requires not a page number, but an offset)\npage_size = 1\n# number of the first page (usually 0 or 1)\nfirst_page_num = 1\n\n\ndef request(query, params):\n query = urlencode({'q': query})[2:]\n\n fp = {'query': query}\n if paging and search_url.find('{pageno}') >= 0:\n fp['pageno'] = (params['pageno'] - 1) * page_size + first_page_num\n\n params['url'] = search_url.format(**fp)\n params['query'] = query\n\n return params\n\n\ndef response(resp):\n results = []\n dom = html.fromstring(resp.text)\n is_onion = True if 'onions' in categories else False # pylint: disable=undefined-variable\n\n if results_xpath:\n for result in eval_xpath_list(dom, results_xpath):\n url = extract_url(eval_xpath_list(result, url_xpath, min_len=1), search_url)\n title = extract_text(eval_xpath_list(result, title_xpath, min_len=1))\n content = extract_text(eval_xpath_list(result, content_xpath, min_len=1))\n tmp_result = {'url': url, 'title': title, 'content': content}\n\n # add thumbnail if available\n if thumbnail_xpath:\n thumbnail_xpath_result = eval_xpath_list(result, thumbnail_xpath)\n if len(thumbnail_xpath_result) > 0:\n tmp_result['img_src'] = extract_url(thumbnail_xpath_result, search_url)\n\n # add alternative cached url if available\n if cached_xpath:\n tmp_result['cached_url'] = cached_url\\\n + extract_text(eval_xpath_list(result, cached_xpath, min_len=1))\n\n if is_onion:\n tmp_result['is_onion'] = True\n\n results.append(tmp_result)\n else:\n if cached_xpath:\n for url, title, content, cached in zip(\n (extract_url(x, search_url) for\n x in eval_xpath_list(dom, url_xpath)),\n map(extract_text, eval_xpath_list(dom, title_xpath)),\n map(extract_text, eval_xpath_list(dom, content_xpath)),\n map(extract_text, eval_xpath_list(dom, cached_xpath))\n ):\n results.append({'url': url, 'title': title, 'content': content,\n 'cached_url': cached_url + cached, 'is_onion': is_onion})\n else:\n for url, title, content in zip(\n (extract_url(x, search_url) for\n x in eval_xpath_list(dom, url_xpath)),\n map(extract_text, eval_xpath_list(dom, title_xpath)),\n map(extract_text, eval_xpath_list(dom, content_xpath))\n ):\n results.append({'url': url, 'title': title, 'content': content, 'is_onion': is_onion})\n\n if not suggestion_xpath:\n return results\n for suggestion in eval_xpath(dom, suggestion_xpath):\n results.append({'suggestion': extract_text(suggestion)})\n return results\n", "path": "searx/engines/xpath.py"}]}
| 1,629 | 142 |
gh_patches_debug_40813
|
rasdani/github-patches
|
git_diff
|
sublimelsp__LSP-1982
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
side_by_side should preview files in a side view
**Describe the bug**
When using `side_by_side: true` option for commands that support it, if the command opens a quick panel with multiple items, the items open in the main view on changing selection and only on pressing Enter to select an item it opens in the side view. I suppose that it should also show previews in side view like native ST functionality does (for example `shift+primary+f12`).
**To Reproduce**
Steps to reproduce the behavior:
1. Set up keybinding like:
```
{
"command": "lsp_symbol_type_definition",
"keys": ["f13"],
"args": {"side_by_side": true},
"context": [
{
"key": "lsp.session_with_capability",
"operator": "equal",
"operand": "typeDefinitionProvider"
},
{
"key": "auto_complete_visible",
"operator": "equal",
"operand": false
}
]
},
```
3. Press F13 on some symbol that is referenced from multiple places
**Expected behavior**
Changing selection in quick panel should preview the file in a side by side view.
**Environment (please complete the following information):**
- OS: macOS
- Sublime Text version: 4134
</issue>
<code>
[start of plugin/locationpicker.py]
1 from .core.logging import debug
2 from .core.protocol import DocumentUri, Location, Position
3 from .core.protocol import LocationLink
4 from .core.sessions import Session
5 from .core.typing import Union, List, Optional, Tuple
6 from .core.views import get_uri_and_position_from_location
7 from .core.views import location_to_human_readable
8 from .core.views import to_encoded_filename
9 import functools
10 import sublime
11 import weakref
12
13
14 def open_location_async(session: Session, location: Union[Location, LocationLink], side_by_side: bool) -> None:
15 flags = sublime.ENCODED_POSITION
16 if side_by_side:
17 flags |= sublime.ADD_TO_SELECTION | sublime.SEMI_TRANSIENT
18
19 def check_success_async(view: Optional[sublime.View]) -> None:
20 if not view:
21 sublime.error_message("Unable to open URI")
22
23 session.open_location_async(location, flags).then(check_success_async)
24
25
26 def open_basic_file(
27 session: Session,
28 uri: str,
29 position: Position,
30 flags: int = 0,
31 group: Optional[int] = None
32 ) -> None:
33 filename = session.config.map_server_uri_to_client_path(uri)
34 if group is None:
35 group = session.window.active_group()
36 session.window.open_file(to_encoded_filename(filename, position), flags=flags, group=group)
37
38
39 class LocationPicker:
40
41 def __init__(
42 self,
43 view: sublime.View,
44 session: Session,
45 locations: Union[List[Location], List[LocationLink]],
46 side_by_side: bool
47 ) -> None:
48 self._view = view
49 window = view.window()
50 if not window:
51 raise ValueError("missing window")
52 self._window = window
53 self._weaksession = weakref.ref(session)
54 self._side_by_side = side_by_side
55 self._items = locations
56 manager = session.manager()
57 base_dir = manager.get_project_path(view.file_name() or "") if manager else None
58 self._window.show_quick_panel(
59 items=[location_to_human_readable(session.config, base_dir, location) for location in locations],
60 on_select=self._select_entry,
61 on_highlight=self._highlight_entry,
62 flags=sublime.KEEP_OPEN_ON_FOCUS_LOST
63 )
64
65 def _unpack(self, index: int) -> Tuple[Optional[Session], Union[Location, LocationLink], DocumentUri, Position]:
66 location = self._items[index]
67 uri, position = get_uri_and_position_from_location(location)
68 return self._weaksession(), location, uri, position
69
70 def _select_entry(self, index: int) -> None:
71 if index >= 0 and self._view.is_valid():
72 session, location, uri, position = self._unpack(index)
73 if not session:
74 return
75 # Note: this has to run on the main thread (and not via open_location_async)
76 # otherwise the bevior feels weird. It's the only reason why open_basic_file exists.
77 if uri.startswith("file:"):
78 flags = sublime.ENCODED_POSITION
79 if self._side_by_side:
80 flags |= sublime.ADD_TO_SELECTION | sublime.SEMI_TRANSIENT
81 open_basic_file(session, uri, position, flags)
82 else:
83 sublime.set_timeout_async(functools.partial(open_location_async, session, location, self._side_by_side))
84 else:
85 self._window.focus_view(self._view)
86
87 def _highlight_entry(self, index: int) -> None:
88 session, _, uri, position = self._unpack(index)
89 if not session:
90 return
91 if uri.startswith("file:"):
92 open_basic_file(session, uri, position, sublime.TRANSIENT | sublime.ENCODED_POSITION)
93 else:
94 # TODO: Preview non-file uris?
95 debug("no preview for", uri)
96
[end of plugin/locationpicker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/plugin/locationpicker.py b/plugin/locationpicker.py
--- a/plugin/locationpicker.py
+++ b/plugin/locationpicker.py
@@ -29,11 +29,11 @@
position: Position,
flags: int = 0,
group: Optional[int] = None
-) -> None:
+) -> sublime.View:
filename = session.config.map_server_uri_to_client_path(uri)
if group is None:
group = session.window.active_group()
- session.window.open_file(to_encoded_filename(filename, position), flags=flags, group=group)
+ return session.window.open_file(to_encoded_filename(filename, position), flags=flags, group=group)
class LocationPicker:
@@ -53,6 +53,7 @@
self._weaksession = weakref.ref(session)
self._side_by_side = side_by_side
self._items = locations
+ self._highlighted_view = None # type: Optional[sublime.View]
manager = session.manager()
base_dir = manager.get_project_path(view.file_name() or "") if manager else None
self._window.show_quick_panel(
@@ -76,20 +77,35 @@
# otherwise the bevior feels weird. It's the only reason why open_basic_file exists.
if uri.startswith("file:"):
flags = sublime.ENCODED_POSITION
- if self._side_by_side:
- flags |= sublime.ADD_TO_SELECTION | sublime.SEMI_TRANSIENT
- open_basic_file(session, uri, position, flags)
+ if not self._side_by_side:
+ open_basic_file(session, uri, position, flags)
else:
sublime.set_timeout_async(functools.partial(open_location_async, session, location, self._side_by_side))
else:
self._window.focus_view(self._view)
+ # When in side-by-side mode close the current highlighted
+ # sheet upon canceling if the sheet is semi-transient
+ if self._side_by_side and self._highlighted_view:
+ sheet = self._highlighted_view.sheet()
+ if sheet and sheet.is_semi_transient():
+ self._highlighted_view.close()
def _highlight_entry(self, index: int) -> None:
session, _, uri, position = self._unpack(index)
if not session:
return
if uri.startswith("file:"):
- open_basic_file(session, uri, position, sublime.TRANSIENT | sublime.ENCODED_POSITION)
+ flags = sublime.ENCODED_POSITION | sublime.FORCE_GROUP
+ if self._side_by_side:
+ if self._highlighted_view and self._highlighted_view.is_valid():
+ # Replacing the MRU is done relative to the current highlighted sheet
+ self._window.focus_view(self._highlighted_view)
+ flags |= sublime.REPLACE_MRU | sublime.SEMI_TRANSIENT
+ else:
+ flags |= sublime.ADD_TO_SELECTION | sublime.SEMI_TRANSIENT
+ else:
+ flags |= sublime.TRANSIENT
+ self._highlighted_view = open_basic_file(session, uri, position, flags, self._window.active_group())
else:
# TODO: Preview non-file uris?
debug("no preview for", uri)
|
{"golden_diff": "diff --git a/plugin/locationpicker.py b/plugin/locationpicker.py\n--- a/plugin/locationpicker.py\n+++ b/plugin/locationpicker.py\n@@ -29,11 +29,11 @@\n position: Position,\n flags: int = 0,\n group: Optional[int] = None\n-) -> None:\n+) -> sublime.View:\n filename = session.config.map_server_uri_to_client_path(uri)\n if group is None:\n group = session.window.active_group()\n- session.window.open_file(to_encoded_filename(filename, position), flags=flags, group=group)\n+ return session.window.open_file(to_encoded_filename(filename, position), flags=flags, group=group)\n \n \n class LocationPicker:\n@@ -53,6 +53,7 @@\n self._weaksession = weakref.ref(session)\n self._side_by_side = side_by_side\n self._items = locations\n+ self._highlighted_view = None # type: Optional[sublime.View]\n manager = session.manager()\n base_dir = manager.get_project_path(view.file_name() or \"\") if manager else None\n self._window.show_quick_panel(\n@@ -76,20 +77,35 @@\n # otherwise the bevior feels weird. It's the only reason why open_basic_file exists.\n if uri.startswith(\"file:\"):\n flags = sublime.ENCODED_POSITION\n- if self._side_by_side:\n- flags |= sublime.ADD_TO_SELECTION | sublime.SEMI_TRANSIENT\n- open_basic_file(session, uri, position, flags)\n+ if not self._side_by_side:\n+ open_basic_file(session, uri, position, flags)\n else:\n sublime.set_timeout_async(functools.partial(open_location_async, session, location, self._side_by_side))\n else:\n self._window.focus_view(self._view)\n+ # When in side-by-side mode close the current highlighted\n+ # sheet upon canceling if the sheet is semi-transient\n+ if self._side_by_side and self._highlighted_view:\n+ sheet = self._highlighted_view.sheet()\n+ if sheet and sheet.is_semi_transient():\n+ self._highlighted_view.close()\n \n def _highlight_entry(self, index: int) -> None:\n session, _, uri, position = self._unpack(index)\n if not session:\n return\n if uri.startswith(\"file:\"):\n- open_basic_file(session, uri, position, sublime.TRANSIENT | sublime.ENCODED_POSITION)\n+ flags = sublime.ENCODED_POSITION | sublime.FORCE_GROUP\n+ if self._side_by_side:\n+ if self._highlighted_view and self._highlighted_view.is_valid():\n+ # Replacing the MRU is done relative to the current highlighted sheet\n+ self._window.focus_view(self._highlighted_view)\n+ flags |= sublime.REPLACE_MRU | sublime.SEMI_TRANSIENT\n+ else:\n+ flags |= sublime.ADD_TO_SELECTION | sublime.SEMI_TRANSIENT\n+ else:\n+ flags |= sublime.TRANSIENT\n+ self._highlighted_view = open_basic_file(session, uri, position, flags, self._window.active_group())\n else:\n # TODO: Preview non-file uris?\n debug(\"no preview for\", uri)\n", "issue": "side_by_side should preview files in a side view\n**Describe the bug**\r\nWhen using `side_by_side: true` option for commands that support it, if the command opens a quick panel with multiple items, the items open in the main view on changing selection and only on pressing Enter to select an item it opens in the side view. I suppose that it should also show previews in side view like native ST functionality does (for example `shift+primary+f12`).\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Set up keybinding like:\r\n```\r\n {\r\n \"command\": \"lsp_symbol_type_definition\",\r\n \"keys\": [\"f13\"],\r\n \"args\": {\"side_by_side\": true},\r\n \"context\": [\r\n {\r\n \"key\": \"lsp.session_with_capability\",\r\n \"operator\": \"equal\",\r\n \"operand\": \"typeDefinitionProvider\"\r\n },\r\n {\r\n \"key\": \"auto_complete_visible\",\r\n \"operator\": \"equal\",\r\n \"operand\": false\r\n }\r\n ]\r\n },\r\n```\r\n3. Press F13 on some symbol that is referenced from multiple places\r\n\r\n**Expected behavior**\r\nChanging selection in quick panel should preview the file in a side by side view.\r\n\r\n**Environment (please complete the following information):**\r\n- OS: macOS\r\n- Sublime Text version: 4134\r\n\n", "before_files": [{"content": "from .core.logging import debug\nfrom .core.protocol import DocumentUri, Location, Position\nfrom .core.protocol import LocationLink\nfrom .core.sessions import Session\nfrom .core.typing import Union, List, Optional, Tuple\nfrom .core.views import get_uri_and_position_from_location\nfrom .core.views import location_to_human_readable\nfrom .core.views import to_encoded_filename\nimport functools\nimport sublime\nimport weakref\n\n\ndef open_location_async(session: Session, location: Union[Location, LocationLink], side_by_side: bool) -> None:\n flags = sublime.ENCODED_POSITION\n if side_by_side:\n flags |= sublime.ADD_TO_SELECTION | sublime.SEMI_TRANSIENT\n\n def check_success_async(view: Optional[sublime.View]) -> None:\n if not view:\n sublime.error_message(\"Unable to open URI\")\n\n session.open_location_async(location, flags).then(check_success_async)\n\n\ndef open_basic_file(\n session: Session,\n uri: str,\n position: Position,\n flags: int = 0,\n group: Optional[int] = None\n) -> None:\n filename = session.config.map_server_uri_to_client_path(uri)\n if group is None:\n group = session.window.active_group()\n session.window.open_file(to_encoded_filename(filename, position), flags=flags, group=group)\n\n\nclass LocationPicker:\n\n def __init__(\n self,\n view: sublime.View,\n session: Session,\n locations: Union[List[Location], List[LocationLink]],\n side_by_side: bool\n ) -> None:\n self._view = view\n window = view.window()\n if not window:\n raise ValueError(\"missing window\")\n self._window = window\n self._weaksession = weakref.ref(session)\n self._side_by_side = side_by_side\n self._items = locations\n manager = session.manager()\n base_dir = manager.get_project_path(view.file_name() or \"\") if manager else None\n self._window.show_quick_panel(\n items=[location_to_human_readable(session.config, base_dir, location) for location in locations],\n on_select=self._select_entry,\n on_highlight=self._highlight_entry,\n flags=sublime.KEEP_OPEN_ON_FOCUS_LOST\n )\n\n def _unpack(self, index: int) -> Tuple[Optional[Session], Union[Location, LocationLink], DocumentUri, Position]:\n location = self._items[index]\n uri, position = get_uri_and_position_from_location(location)\n return self._weaksession(), location, uri, position\n\n def _select_entry(self, index: int) -> None:\n if index >= 0 and self._view.is_valid():\n session, location, uri, position = self._unpack(index)\n if not session:\n return\n # Note: this has to run on the main thread (and not via open_location_async)\n # otherwise the bevior feels weird. It's the only reason why open_basic_file exists.\n if uri.startswith(\"file:\"):\n flags = sublime.ENCODED_POSITION\n if self._side_by_side:\n flags |= sublime.ADD_TO_SELECTION | sublime.SEMI_TRANSIENT\n open_basic_file(session, uri, position, flags)\n else:\n sublime.set_timeout_async(functools.partial(open_location_async, session, location, self._side_by_side))\n else:\n self._window.focus_view(self._view)\n\n def _highlight_entry(self, index: int) -> None:\n session, _, uri, position = self._unpack(index)\n if not session:\n return\n if uri.startswith(\"file:\"):\n open_basic_file(session, uri, position, sublime.TRANSIENT | sublime.ENCODED_POSITION)\n else:\n # TODO: Preview non-file uris?\n debug(\"no preview for\", uri)\n", "path": "plugin/locationpicker.py"}]}
| 1,809 | 701 |
gh_patches_debug_18206
|
rasdani/github-patches
|
git_diff
|
mindsdb__lightwood-698
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make `column_importances` optional
If the column importance module gets automatically disabled (see #681) an issue may arise where the information required for a `ModelAnalysis` object is not available. Example stacktrace:
```python
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-10-b3f165881113> in <module>
16
17 # Analyze the ensembles
---> 18 predictor.analyze_ensemble(enc_data)
/tmp/b380bd15a0ec89f57a82f719f514e67f0cae00fe7d0085d816353697296992059.py in analyze_ensemble(self, enc_data)
1439 # --------------- #
1440 log.info("Analyzing the ensemble of mixers")
-> 1441 self.model_analysis, self.runtime_analyzer = model_analyzer(
1442 data=encoded_test_data,
1443 train_data=encoded_train_data,
~/Documents/lightwood/lightwood/analysis/analyze.py in model_analyzer(predictor, data, train_data, stats_info, target, ts_cfg, dtype_dict, accuracy_functions, analysis_blocks)
91 test_sample_size=len(encoded_val_data),
92 confusion_matrix=runtime_analyzer['cm'],
---> 93 column_importances=runtime_analyzer['column_importances'],
94 histograms=stats_info.histograms,
95 dtypes=dtype_dict
KeyError: 'column_importances'
```
</issue>
<code>
[start of lightwood/analysis/analyze.py]
1 from typing import Dict, List, Tuple, Optional
2
3 from lightwood.helpers.log import log
4 from lightwood.api import dtype
5 from lightwood.ensemble import BaseEnsemble
6 from lightwood.analysis.base import BaseAnalysisBlock
7 from lightwood.data.encoded_ds import EncodedDs
8 from lightwood.encoder.text.pretrained import PretrainedLangEncoder
9 from lightwood.api.types import ModelAnalysis, StatisticalAnalysis, TimeseriesSettings, PredictionArguments
10
11
12 def model_analyzer(
13 predictor: BaseEnsemble,
14 data: EncodedDs,
15 train_data: EncodedDs,
16 stats_info: StatisticalAnalysis,
17 target: str,
18 ts_cfg: TimeseriesSettings,
19 dtype_dict: Dict[str, str],
20 accuracy_functions,
21 analysis_blocks: Optional[List[BaseAnalysisBlock]] = []
22 ) -> Tuple[ModelAnalysis, Dict[str, object]]:
23 """
24 Analyses model on a validation subset to evaluate accuracy, estimate feature importance and generate a
25 calibration model to estimating confidence in future predictions.
26
27 Additionally, any user-specified analysis blocks (see class `BaseAnalysisBlock`) are also called here.
28
29 :return:
30 runtime_analyzer: This dictionary object gets populated in a sequential fashion with data generated from
31 any `.analyze()` block call. This dictionary object is stored in the predictor itself, and used when
32 calling the `.explain()` method of all analysis blocks when generating predictions.
33
34 model_analysis: `ModelAnalysis` object that contains core analysis metrics, not necessarily needed when predicting.
35 """
36
37 runtime_analyzer = {}
38 data_type = dtype_dict[target]
39
40 # retrieve encoded data representations
41 encoded_train_data = train_data
42 encoded_val_data = data
43 data = encoded_val_data.data_frame
44 input_cols = list([col for col in data.columns if col != target])
45
46 # predictive task
47 is_numerical = data_type in (dtype.integer, dtype.float, dtype.array, dtype.tsarray, dtype.quantity)
48 is_classification = data_type in (dtype.categorical, dtype.binary)
49 is_multi_ts = ts_cfg.is_timeseries and ts_cfg.nr_predictions > 1
50 has_pretrained_text_enc = any([isinstance(enc, PretrainedLangEncoder)
51 for enc in encoded_train_data.encoders.values()])
52
53 # raw predictions for validation dataset
54 args = {} if not is_classification else {"predict_proba": True}
55 normal_predictions = predictor(encoded_val_data, args=PredictionArguments.from_dict(args))
56 normal_predictions = normal_predictions.set_index(data.index)
57
58 # ------------------------- #
59 # Run analysis blocks, both core and user-defined
60 # ------------------------- #
61 kwargs = {
62 'predictor': predictor,
63 'target': target,
64 'input_cols': input_cols,
65 'dtype_dict': dtype_dict,
66 'normal_predictions': normal_predictions,
67 'data': data,
68 'train_data': train_data,
69 'encoded_val_data': encoded_val_data,
70 'is_classification': is_classification,
71 'is_numerical': is_numerical,
72 'is_multi_ts': is_multi_ts,
73 'stats_info': stats_info,
74 'ts_cfg': ts_cfg,
75 'accuracy_functions': accuracy_functions,
76 'has_pretrained_text_enc': has_pretrained_text_enc
77 }
78
79 for block in analysis_blocks:
80 log.info("The block %s is now running its analyze() method", block.__class__.__name__)
81 runtime_analyzer = block.analyze(runtime_analyzer, **kwargs)
82
83 # ------------------------- #
84 # Populate ModelAnalysis object
85 # ------------------------- #
86 model_analysis = ModelAnalysis(
87 accuracies=runtime_analyzer['score_dict'],
88 accuracy_histogram=runtime_analyzer['acc_histogram'],
89 accuracy_samples=runtime_analyzer['acc_samples'],
90 train_sample_size=len(encoded_train_data),
91 test_sample_size=len(encoded_val_data),
92 confusion_matrix=runtime_analyzer['cm'],
93 column_importances=runtime_analyzer['column_importances'],
94 histograms=stats_info.histograms,
95 dtypes=dtype_dict
96 )
97
98 return model_analysis, runtime_analyzer
99
[end of lightwood/analysis/analyze.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lightwood/analysis/analyze.py b/lightwood/analysis/analyze.py
--- a/lightwood/analysis/analyze.py
+++ b/lightwood/analysis/analyze.py
@@ -84,13 +84,13 @@
# Populate ModelAnalysis object
# ------------------------- #
model_analysis = ModelAnalysis(
- accuracies=runtime_analyzer['score_dict'],
- accuracy_histogram=runtime_analyzer['acc_histogram'],
- accuracy_samples=runtime_analyzer['acc_samples'],
+ accuracies=runtime_analyzer.get('score_dict', {}),
+ accuracy_histogram=runtime_analyzer.get('acc_histogram', {}),
+ accuracy_samples=runtime_analyzer.get('acc_samples', {}),
train_sample_size=len(encoded_train_data),
test_sample_size=len(encoded_val_data),
confusion_matrix=runtime_analyzer['cm'],
- column_importances=runtime_analyzer['column_importances'],
+ column_importances=runtime_analyzer.get('column_importances', {}),
histograms=stats_info.histograms,
dtypes=dtype_dict
)
|
{"golden_diff": "diff --git a/lightwood/analysis/analyze.py b/lightwood/analysis/analyze.py\n--- a/lightwood/analysis/analyze.py\n+++ b/lightwood/analysis/analyze.py\n@@ -84,13 +84,13 @@\n # Populate ModelAnalysis object\n # ------------------------- #\n model_analysis = ModelAnalysis(\n- accuracies=runtime_analyzer['score_dict'],\n- accuracy_histogram=runtime_analyzer['acc_histogram'],\n- accuracy_samples=runtime_analyzer['acc_samples'],\n+ accuracies=runtime_analyzer.get('score_dict', {}),\n+ accuracy_histogram=runtime_analyzer.get('acc_histogram', {}),\n+ accuracy_samples=runtime_analyzer.get('acc_samples', {}),\n train_sample_size=len(encoded_train_data),\n test_sample_size=len(encoded_val_data),\n confusion_matrix=runtime_analyzer['cm'],\n- column_importances=runtime_analyzer['column_importances'],\n+ column_importances=runtime_analyzer.get('column_importances', {}),\n histograms=stats_info.histograms,\n dtypes=dtype_dict\n )\n", "issue": "Make `column_importances` optional\nIf the column importance module gets automatically disabled (see #681) an issue may arise where the information required for a `ModelAnalysis` object is not available. Example stacktrace:\r\n\r\n```python\r\n---------------------------------------------------------------------------\r\nKeyError Traceback (most recent call last)\r\n<ipython-input-10-b3f165881113> in <module>\r\n 16 \r\n 17 # Analyze the ensembles\r\n---> 18 predictor.analyze_ensemble(enc_data)\r\n\r\n/tmp/b380bd15a0ec89f57a82f719f514e67f0cae00fe7d0085d816353697296992059.py in analyze_ensemble(self, enc_data)\r\n 1439 # --------------- #\r\n 1440 log.info(\"Analyzing the ensemble of mixers\")\r\n-> 1441 self.model_analysis, self.runtime_analyzer = model_analyzer(\r\n 1442 data=encoded_test_data,\r\n 1443 train_data=encoded_train_data,\r\n\r\n~/Documents/lightwood/lightwood/analysis/analyze.py in model_analyzer(predictor, data, train_data, stats_info, target, ts_cfg, dtype_dict, accuracy_functions, analysis_blocks)\r\n 91 test_sample_size=len(encoded_val_data),\r\n 92 confusion_matrix=runtime_analyzer['cm'],\r\n---> 93 column_importances=runtime_analyzer['column_importances'],\r\n 94 histograms=stats_info.histograms,\r\n 95 dtypes=dtype_dict\r\n\r\nKeyError: 'column_importances'\r\n```\r\n\n", "before_files": [{"content": "from typing import Dict, List, Tuple, Optional\n\nfrom lightwood.helpers.log import log\nfrom lightwood.api import dtype\nfrom lightwood.ensemble import BaseEnsemble\nfrom lightwood.analysis.base import BaseAnalysisBlock\nfrom lightwood.data.encoded_ds import EncodedDs\nfrom lightwood.encoder.text.pretrained import PretrainedLangEncoder\nfrom lightwood.api.types import ModelAnalysis, StatisticalAnalysis, TimeseriesSettings, PredictionArguments\n\n\ndef model_analyzer(\n predictor: BaseEnsemble,\n data: EncodedDs,\n train_data: EncodedDs,\n stats_info: StatisticalAnalysis,\n target: str,\n ts_cfg: TimeseriesSettings,\n dtype_dict: Dict[str, str],\n accuracy_functions,\n analysis_blocks: Optional[List[BaseAnalysisBlock]] = []\n) -> Tuple[ModelAnalysis, Dict[str, object]]:\n \"\"\"\n Analyses model on a validation subset to evaluate accuracy, estimate feature importance and generate a\n calibration model to estimating confidence in future predictions.\n\n Additionally, any user-specified analysis blocks (see class `BaseAnalysisBlock`) are also called here.\n\n :return:\n runtime_analyzer: This dictionary object gets populated in a sequential fashion with data generated from\n any `.analyze()` block call. This dictionary object is stored in the predictor itself, and used when\n calling the `.explain()` method of all analysis blocks when generating predictions.\n\n model_analysis: `ModelAnalysis` object that contains core analysis metrics, not necessarily needed when predicting.\n \"\"\"\n\n runtime_analyzer = {}\n data_type = dtype_dict[target]\n\n # retrieve encoded data representations\n encoded_train_data = train_data\n encoded_val_data = data\n data = encoded_val_data.data_frame\n input_cols = list([col for col in data.columns if col != target])\n\n # predictive task\n is_numerical = data_type in (dtype.integer, dtype.float, dtype.array, dtype.tsarray, dtype.quantity)\n is_classification = data_type in (dtype.categorical, dtype.binary)\n is_multi_ts = ts_cfg.is_timeseries and ts_cfg.nr_predictions > 1\n has_pretrained_text_enc = any([isinstance(enc, PretrainedLangEncoder)\n for enc in encoded_train_data.encoders.values()])\n\n # raw predictions for validation dataset\n args = {} if not is_classification else {\"predict_proba\": True}\n normal_predictions = predictor(encoded_val_data, args=PredictionArguments.from_dict(args))\n normal_predictions = normal_predictions.set_index(data.index)\n\n # ------------------------- #\n # Run analysis blocks, both core and user-defined\n # ------------------------- #\n kwargs = {\n 'predictor': predictor,\n 'target': target,\n 'input_cols': input_cols,\n 'dtype_dict': dtype_dict,\n 'normal_predictions': normal_predictions,\n 'data': data,\n 'train_data': train_data,\n 'encoded_val_data': encoded_val_data,\n 'is_classification': is_classification,\n 'is_numerical': is_numerical,\n 'is_multi_ts': is_multi_ts,\n 'stats_info': stats_info,\n 'ts_cfg': ts_cfg,\n 'accuracy_functions': accuracy_functions,\n 'has_pretrained_text_enc': has_pretrained_text_enc\n }\n\n for block in analysis_blocks:\n log.info(\"The block %s is now running its analyze() method\", block.__class__.__name__)\n runtime_analyzer = block.analyze(runtime_analyzer, **kwargs)\n\n # ------------------------- #\n # Populate ModelAnalysis object\n # ------------------------- #\n model_analysis = ModelAnalysis(\n accuracies=runtime_analyzer['score_dict'],\n accuracy_histogram=runtime_analyzer['acc_histogram'],\n accuracy_samples=runtime_analyzer['acc_samples'],\n train_sample_size=len(encoded_train_data),\n test_sample_size=len(encoded_val_data),\n confusion_matrix=runtime_analyzer['cm'],\n column_importances=runtime_analyzer['column_importances'],\n histograms=stats_info.histograms,\n dtypes=dtype_dict\n )\n\n return model_analysis, runtime_analyzer\n", "path": "lightwood/analysis/analyze.py"}]}
| 1,963 | 226 |
gh_patches_debug_14927
|
rasdani/github-patches
|
git_diff
|
googleapis__python-bigquery-30
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Refactor logic in synth.py for excluding noxfile
As explained in a [comment](https://github.com/googleapis/python-bigquery/pull/1#discussion_r375560206), preventing the synthtool for overriding the customized `noxfile.py` can be achieved in a more straightforward way than currently used.
</issue>
<code>
[start of synth.py]
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """This script is used to synthesize generated parts of this library."""
16
17 import os
18
19 import synthtool as s
20 from synthtool import gcp
21
22 gapic = gcp.GAPICGenerator()
23 common = gcp.CommonTemplates()
24 version = 'v2'
25
26 library = gapic.py_library(
27 'bigquery',
28 version,
29 config_path='/google/cloud/bigquery/'
30 'artman_bigquery_v2.yaml',
31 artman_output_name='bigquery-v2',
32 include_protos=True,
33 )
34
35 s.move(
36 [
37 library / "google/cloud/bigquery_v2/gapic/enums.py",
38 library / "google/cloud/bigquery_v2/types.py",
39 library / "google/cloud/bigquery_v2/proto/location*",
40 library / "google/cloud/bigquery_v2/proto/encryption_config*",
41 library / "google/cloud/bigquery_v2/proto/model*",
42 library / "google/cloud/bigquery_v2/proto/standard_sql*",
43 ],
44 )
45
46 # Fix up proto docs that are missing summary line.
47 s.replace(
48 "google/cloud/bigquery_v2/proto/model_pb2.py",
49 '"""Attributes:',
50 '"""Protocol buffer.\n\n Attributes:',
51 )
52 s.replace(
53 "google/cloud/bigquery_v2/proto/encryption_config_pb2.py",
54 '"""Attributes:',
55 '"""Encryption configuration.\n\n Attributes:',
56 )
57
58 # Remove non-ascii characters from docstrings for Python 2.7.
59 # Format quoted strings as plain text.
60 s.replace("google/cloud/bigquery_v2/proto/*.py", "[“”]", '``')
61
62 # ----------------------------------------------------------------------------
63 # Add templated files
64 # ----------------------------------------------------------------------------
65 templated_files = common.py_library(cov_level=100)
66 # we do not want to override the custom noxfile with the generated one
67 os.remove(os.path.join(templated_files, "noxfile.py"))
68 s.move(templated_files)
69
70 s.shell.run(["nox", "-s", "blacken"], hide_output=False)
71
[end of synth.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/synth.py b/synth.py
--- a/synth.py
+++ b/synth.py
@@ -14,8 +14,6 @@
"""This script is used to synthesize generated parts of this library."""
-import os
-
import synthtool as s
from synthtool import gcp
@@ -63,8 +61,6 @@
# Add templated files
# ----------------------------------------------------------------------------
templated_files = common.py_library(cov_level=100)
-# we do not want to override the custom noxfile with the generated one
-os.remove(os.path.join(templated_files, "noxfile.py"))
-s.move(templated_files)
+s.move(templated_files, excludes=["noxfile.py"])
s.shell.run(["nox", "-s", "blacken"], hide_output=False)
|
{"golden_diff": "diff --git a/synth.py b/synth.py\n--- a/synth.py\n+++ b/synth.py\n@@ -14,8 +14,6 @@\n \n \"\"\"This script is used to synthesize generated parts of this library.\"\"\"\n \n-import os\n-\n import synthtool as s\n from synthtool import gcp\n \n@@ -63,8 +61,6 @@\n # Add templated files\n # ----------------------------------------------------------------------------\n templated_files = common.py_library(cov_level=100)\n-# we do not want to override the custom noxfile with the generated one\n-os.remove(os.path.join(templated_files, \"noxfile.py\"))\n-s.move(templated_files)\n+s.move(templated_files, excludes=[\"noxfile.py\"])\n \n s.shell.run([\"nox\", \"-s\", \"blacken\"], hide_output=False)\n", "issue": "Refactor logic in synth.py for excluding noxfile\nAs explained in a [comment](https://github.com/googleapis/python-bigquery/pull/1#discussion_r375560206), preventing the synthtool for overriding the customized `noxfile.py` can be achieved in a more straightforward way than currently used.\r\n\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This script is used to synthesize generated parts of this library.\"\"\"\n\nimport os\n\nimport synthtool as s\nfrom synthtool import gcp\n\ngapic = gcp.GAPICGenerator()\ncommon = gcp.CommonTemplates()\nversion = 'v2'\n\nlibrary = gapic.py_library(\n 'bigquery',\n version,\n config_path='/google/cloud/bigquery/'\n 'artman_bigquery_v2.yaml',\n artman_output_name='bigquery-v2',\n include_protos=True,\n)\n\ns.move(\n [\n library / \"google/cloud/bigquery_v2/gapic/enums.py\",\n library / \"google/cloud/bigquery_v2/types.py\",\n library / \"google/cloud/bigquery_v2/proto/location*\",\n library / \"google/cloud/bigquery_v2/proto/encryption_config*\",\n library / \"google/cloud/bigquery_v2/proto/model*\",\n library / \"google/cloud/bigquery_v2/proto/standard_sql*\",\n ],\n)\n\n# Fix up proto docs that are missing summary line.\ns.replace(\n \"google/cloud/bigquery_v2/proto/model_pb2.py\",\n '\"\"\"Attributes:',\n '\"\"\"Protocol buffer.\\n\\n Attributes:',\n)\ns.replace(\n \"google/cloud/bigquery_v2/proto/encryption_config_pb2.py\",\n '\"\"\"Attributes:',\n '\"\"\"Encryption configuration.\\n\\n Attributes:',\n)\n\n# Remove non-ascii characters from docstrings for Python 2.7.\n# Format quoted strings as plain text.\ns.replace(\"google/cloud/bigquery_v2/proto/*.py\", \"[\u201c\u201d]\", '``')\n\n# ----------------------------------------------------------------------------\n# Add templated files\n# ----------------------------------------------------------------------------\ntemplated_files = common.py_library(cov_level=100)\n# we do not want to override the custom noxfile with the generated one\nos.remove(os.path.join(templated_files, \"noxfile.py\"))\ns.move(templated_files)\n\ns.shell.run([\"nox\", \"-s\", \"blacken\"], hide_output=False)\n", "path": "synth.py"}]}
| 1,283 | 175 |
gh_patches_debug_17991
|
rasdani/github-patches
|
git_diff
|
searx__searx-1464
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Seems that startpage and ixquick enignes are not working anymore on Searx
Hello and thanks for the works.
I tried on my searx and on other instance like searx.me... but It's not working.
How could we fix that?
Thanks.
</issue>
<code>
[start of searx/engines/startpage.py]
1 # Startpage (Web)
2 #
3 # @website https://startpage.com
4 # @provide-api no (nothing found)
5 #
6 # @using-api no
7 # @results HTML
8 # @stable no (HTML can change)
9 # @parse url, title, content
10 #
11 # @todo paging
12
13 from lxml import html
14 from dateutil import parser
15 from datetime import datetime, timedelta
16 import re
17 from searx.engines.xpath import extract_text
18
19 # engine dependent config
20 categories = ['general']
21 # there is a mechanism to block "bot" search
22 # (probably the parameter qid), require
23 # storing of qid's between mulitble search-calls
24
25 # paging = False
26 language_support = True
27
28 # search-url
29 base_url = 'https://startpage.com/'
30 search_url = base_url + 'do/search'
31
32 # specific xpath variables
33 # ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"]
34 # not ads: div[@class="result"] are the direct childs of div[@id="results"]
35 results_xpath = '//div[@class="result"]'
36 link_xpath = './/h3/a'
37
38
39 # do search-request
40 def request(query, params):
41 offset = (params['pageno'] - 1) * 10
42
43 params['url'] = search_url
44 params['method'] = 'POST'
45 params['data'] = {'query': query,
46 'startat': offset}
47
48 # set language
49 params['data']['with_language'] = ('lang_' + params['language'].split('-')[0])
50
51 return params
52
53
54 # get response from search-request
55 def response(resp):
56 results = []
57
58 dom = html.fromstring(resp.text)
59
60 # parse results
61 for result in dom.xpath(results_xpath):
62 links = result.xpath(link_xpath)
63 if not links:
64 continue
65 link = links[0]
66 url = link.attrib.get('href')
67
68 # block google-ad url's
69 if re.match(r"^http(s|)://(www\.)?google\.[a-z]+/aclk.*$", url):
70 continue
71
72 # block startpage search url's
73 if re.match(r"^http(s|)://(www\.)?startpage\.com/do/search\?.*$", url):
74 continue
75
76 # block ixquick search url's
77 if re.match(r"^http(s|)://(www\.)?ixquick\.com/do/search\?.*$", url):
78 continue
79
80 title = extract_text(link)
81
82 if result.xpath('./p[@class="desc clk"]'):
83 content = extract_text(result.xpath('./p[@class="desc clk"]'))
84 else:
85 content = ''
86
87 published_date = None
88
89 # check if search result starts with something like: "2 Sep 2014 ... "
90 if re.match(r"^([1-9]|[1-2][0-9]|3[0-1]) [A-Z][a-z]{2} [0-9]{4} \.\.\. ", content):
91 date_pos = content.find('...') + 4
92 date_string = content[0:date_pos - 5]
93 published_date = parser.parse(date_string, dayfirst=True)
94
95 # fix content string
96 content = content[date_pos:]
97
98 # check if search result starts with something like: "5 days ago ... "
99 elif re.match(r"^[0-9]+ days? ago \.\.\. ", content):
100 date_pos = content.find('...') + 4
101 date_string = content[0:date_pos - 5]
102
103 # calculate datetime
104 published_date = datetime.now() - timedelta(days=int(re.match(r'\d+', date_string).group()))
105
106 # fix content string
107 content = content[date_pos:]
108
109 if published_date:
110 # append result
111 results.append({'url': url,
112 'title': title,
113 'content': content,
114 'publishedDate': published_date})
115 else:
116 # append result
117 results.append({'url': url,
118 'title': title,
119 'content': content})
120
121 # return results
122 return results
123
[end of searx/engines/startpage.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/searx/engines/startpage.py b/searx/engines/startpage.py
--- a/searx/engines/startpage.py
+++ b/searx/engines/startpage.py
@@ -32,8 +32,9 @@
# specific xpath variables
# ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"]
# not ads: div[@class="result"] are the direct childs of div[@id="results"]
-results_xpath = '//div[@class="result"]'
+results_xpath = '//li[contains(@class, "search-result") and contains(@class, "search-item")]'
link_xpath = './/h3/a'
+content_xpath = './p[@class="search-item__body"]'
# do search-request
@@ -79,8 +80,8 @@
title = extract_text(link)
- if result.xpath('./p[@class="desc clk"]'):
- content = extract_text(result.xpath('./p[@class="desc clk"]'))
+ if result.xpath(content_xpath):
+ content = extract_text(result.xpath(content_xpath))
else:
content = ''
|
{"golden_diff": "diff --git a/searx/engines/startpage.py b/searx/engines/startpage.py\n--- a/searx/engines/startpage.py\n+++ b/searx/engines/startpage.py\n@@ -32,8 +32,9 @@\n # specific xpath variables\n # ads xpath //div[@id=\"results\"]/div[@id=\"sponsored\"]//div[@class=\"result\"]\n # not ads: div[@class=\"result\"] are the direct childs of div[@id=\"results\"]\n-results_xpath = '//div[@class=\"result\"]'\n+results_xpath = '//li[contains(@class, \"search-result\") and contains(@class, \"search-item\")]'\n link_xpath = './/h3/a'\n+content_xpath = './p[@class=\"search-item__body\"]'\n \n \n # do search-request\n@@ -79,8 +80,8 @@\n \n title = extract_text(link)\n \n- if result.xpath('./p[@class=\"desc clk\"]'):\n- content = extract_text(result.xpath('./p[@class=\"desc clk\"]'))\n+ if result.xpath(content_xpath):\n+ content = extract_text(result.xpath(content_xpath))\n else:\n content = ''\n", "issue": "Seems that startpage and ixquick enignes are not working anymore on Searx\nHello and thanks for the works.\r\n\r\nI tried on my searx and on other instance like searx.me... but It's not working.\r\n\r\nHow could we fix that?\r\n\r\nThanks.\r\n\r\n\n", "before_files": [{"content": "# Startpage (Web)\n#\n# @website https://startpage.com\n# @provide-api no (nothing found)\n#\n# @using-api no\n# @results HTML\n# @stable no (HTML can change)\n# @parse url, title, content\n#\n# @todo paging\n\nfrom lxml import html\nfrom dateutil import parser\nfrom datetime import datetime, timedelta\nimport re\nfrom searx.engines.xpath import extract_text\n\n# engine dependent config\ncategories = ['general']\n# there is a mechanism to block \"bot\" search\n# (probably the parameter qid), require\n# storing of qid's between mulitble search-calls\n\n# paging = False\nlanguage_support = True\n\n# search-url\nbase_url = 'https://startpage.com/'\nsearch_url = base_url + 'do/search'\n\n# specific xpath variables\n# ads xpath //div[@id=\"results\"]/div[@id=\"sponsored\"]//div[@class=\"result\"]\n# not ads: div[@class=\"result\"] are the direct childs of div[@id=\"results\"]\nresults_xpath = '//div[@class=\"result\"]'\nlink_xpath = './/h3/a'\n\n\n# do search-request\ndef request(query, params):\n offset = (params['pageno'] - 1) * 10\n\n params['url'] = search_url\n params['method'] = 'POST'\n params['data'] = {'query': query,\n 'startat': offset}\n\n # set language\n params['data']['with_language'] = ('lang_' + params['language'].split('-')[0])\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n dom = html.fromstring(resp.text)\n\n # parse results\n for result in dom.xpath(results_xpath):\n links = result.xpath(link_xpath)\n if not links:\n continue\n link = links[0]\n url = link.attrib.get('href')\n\n # block google-ad url's\n if re.match(r\"^http(s|)://(www\\.)?google\\.[a-z]+/aclk.*$\", url):\n continue\n\n # block startpage search url's\n if re.match(r\"^http(s|)://(www\\.)?startpage\\.com/do/search\\?.*$\", url):\n continue\n\n # block ixquick search url's\n if re.match(r\"^http(s|)://(www\\.)?ixquick\\.com/do/search\\?.*$\", url):\n continue\n\n title = extract_text(link)\n\n if result.xpath('./p[@class=\"desc clk\"]'):\n content = extract_text(result.xpath('./p[@class=\"desc clk\"]'))\n else:\n content = ''\n\n published_date = None\n\n # check if search result starts with something like: \"2 Sep 2014 ... \"\n if re.match(r\"^([1-9]|[1-2][0-9]|3[0-1]) [A-Z][a-z]{2} [0-9]{4} \\.\\.\\. \", content):\n date_pos = content.find('...') + 4\n date_string = content[0:date_pos - 5]\n published_date = parser.parse(date_string, dayfirst=True)\n\n # fix content string\n content = content[date_pos:]\n\n # check if search result starts with something like: \"5 days ago ... \"\n elif re.match(r\"^[0-9]+ days? ago \\.\\.\\. \", content):\n date_pos = content.find('...') + 4\n date_string = content[0:date_pos - 5]\n\n # calculate datetime\n published_date = datetime.now() - timedelta(days=int(re.match(r'\\d+', date_string).group()))\n\n # fix content string\n content = content[date_pos:]\n\n if published_date:\n # append result\n results.append({'url': url,\n 'title': title,\n 'content': content,\n 'publishedDate': published_date})\n else:\n # append result\n results.append({'url': url,\n 'title': title,\n 'content': content})\n\n # return results\n return results\n", "path": "searx/engines/startpage.py"}]}
| 1,778 | 253 |
gh_patches_debug_19196
|
rasdani/github-patches
|
git_diff
|
pymedusa__Medusa-6867
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Free Mobile SMS notification failed
Hi,
I recently update Medusa, and since then, I didn't receive any SMS for started/finished downloads
On the error log file I have this:
```
2019-06-22 15:42:51 ERROR Thread_2 :: [74c3f12] Exception generated: can't concat str to bytes
Traceback (most recent call last):
File "C:\Medusa\Medusa\medusa\server\web\core\base.py", line 261, in async_call
result = function(**kwargs)
File "C:\Medusa\Medusa\medusa\server\web\home\handler.py", line 300, in testFreeMobile
result, message = notifiers.freemobile_notifier.test_notify(freemobile_id, freemobile_apikey)
File "C:\Medusa\Medusa\medusa\notifiers\freemobile.py", line 30, in test_notify
return self._notifyFreeMobile('Test', 'This is a test notification from Medusa', cust_id, apiKey, force=True)
File "C:\Medusa\Medusa\medusa\notifiers\freemobile.py", line 120, in _notifyFreeMobile
return self._sendFreeMobileSMS(title, message, cust_id, apiKey)
File "C:\Medusa\Medusa\medusa\notifiers\freemobile.py", line 51, in _sendFreeMobileSMS
msg_quoted = quote(title.encode('utf-8') + ': ' + msg.encode('utf-8'))
TypeError: can't concat str to bytes
```
My Free Mobile customer ID and my Free Mobile API Key are still the same..
Thanks in advance for any help you may provide.
Regards.
</issue>
<code>
[start of medusa/notifiers/freemobile.py]
1 # coding=utf-8
2
3 from __future__ import unicode_literals
4
5 import logging
6 from builtins import object
7
8 from medusa import app
9 from medusa.common import (
10 NOTIFY_DOWNLOAD,
11 NOTIFY_GIT_UPDATE,
12 NOTIFY_GIT_UPDATE_TEXT,
13 NOTIFY_LOGIN,
14 NOTIFY_LOGIN_TEXT,
15 NOTIFY_SUBTITLE_DOWNLOAD,
16 notifyStrings,
17 )
18 from medusa.logger.adapters.style import BraceAdapter
19
20 from requests.compat import quote
21
22 from six.moves.urllib.request import Request, urlopen
23
24 log = BraceAdapter(logging.getLogger(__name__))
25 log.logger.addHandler(logging.NullHandler())
26
27
28 class Notifier(object):
29 def test_notify(self, cust_id=None, apiKey=None):
30 return self._notifyFreeMobile('Test', 'This is a test notification from Medusa', cust_id, apiKey, force=True)
31
32 def _sendFreeMobileSMS(self, title, msg, cust_id=None, apiKey=None):
33 """
34 Send a SMS notification
35
36 msg: The message to send (unicode)
37 title: The title of the message
38 userKey: The pushover user id to send the message to (or to subscribe with)
39
40 return: True if the message succeeded, False otherwise
41 """
42 if cust_id is None:
43 cust_id = app.FREEMOBILE_ID
44 if apiKey is None:
45 apiKey = app.FREEMOBILE_APIKEY
46
47 log.debug(u'Free Mobile in use with API KEY: {0}', apiKey)
48
49 # build up the URL and parameters
50 msg = msg.strip()
51 msg_quoted = quote(title.encode('utf-8') + ': ' + msg.encode('utf-8'))
52 URL = 'https://smsapi.free-mobile.fr/sendmsg?user=' + cust_id + '&pass=' + apiKey + '&msg=' + msg_quoted
53
54 req = Request(URL)
55 # send the request to Free Mobile
56 try:
57 urlopen(req)
58 except IOError as e:
59 if hasattr(e, 'code'):
60 error_message = {
61 400: 'Missing parameter(s).',
62 402: 'Too much SMS sent in a short time.',
63 403: 'API service is not enabled in your account or ID / API key is incorrect.',
64 500: 'Server error. Please retry in few moment.',
65 }
66 message = error_message.get(e.code)
67 if message:
68 log.error(message)
69 return False, message
70 except Exception as e:
71 message = u'Error while sending SMS: {0}'.format(e)
72 log.error(message)
73 return False, message
74
75 message = 'Free Mobile SMS successful.'
76 log.info(message)
77 return True, message
78
79 def notify_snatch(self, title, message):
80 if app.FREEMOBILE_NOTIFY_ONSNATCH:
81 self._notifyFreeMobile(title, message)
82
83 def notify_download(self, ep_obj, title=notifyStrings[NOTIFY_DOWNLOAD]):
84 if app.FREEMOBILE_NOTIFY_ONDOWNLOAD:
85 self._notifyFreeMobile(title, ep_obj.pretty_name_with_quality())
86
87 def notify_subtitle_download(self, ep_obj, lang, title=notifyStrings[NOTIFY_SUBTITLE_DOWNLOAD]):
88 if app.FREEMOBILE_NOTIFY_ONSUBTITLEDOWNLOAD:
89 self._notifyFreeMobile(title, ep_obj.pretty_name() + ': ' + lang)
90
91 def notify_git_update(self, new_version='??'):
92 if app.USE_FREEMOBILE:
93 update_text = notifyStrings[NOTIFY_GIT_UPDATE_TEXT]
94 title = notifyStrings[NOTIFY_GIT_UPDATE]
95 self._notifyFreeMobile(title, update_text + new_version)
96
97 def notify_login(self, ipaddress=''):
98 if app.USE_FREEMOBILE:
99 update_text = notifyStrings[NOTIFY_LOGIN_TEXT]
100 title = notifyStrings[NOTIFY_LOGIN]
101 self._notifyFreeMobile(title, update_text.format(ipaddress))
102
103 def _notifyFreeMobile(self, title, message, cust_id=None, apiKey=None, force=False): # pylint: disable=too-many-arguments
104 """
105 Sends a SMS notification
106
107 title: The title of the notification to send
108 message: The message string to send
109 cust_id: Your Free Mobile customer ID
110 apikey: Your Free Mobile API key
111 force: Enforce sending, for instance for testing
112 """
113
114 if not app.USE_FREEMOBILE and not force:
115 log.debug(u'Notification for Free Mobile not enabled, skipping this notification')
116 return False, 'Disabled'
117
118 log.debug(u'Sending a SMS for {0}', message)
119
120 return self._sendFreeMobileSMS(title, message, cust_id, apiKey)
121
[end of medusa/notifiers/freemobile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/medusa/notifiers/freemobile.py b/medusa/notifiers/freemobile.py
--- a/medusa/notifiers/freemobile.py
+++ b/medusa/notifiers/freemobile.py
@@ -3,7 +3,6 @@
from __future__ import unicode_literals
import logging
-from builtins import object
from medusa import app
from medusa.common import (
@@ -47,9 +46,13 @@
log.debug(u'Free Mobile in use with API KEY: {0}', apiKey)
# build up the URL and parameters
- msg = msg.strip()
- msg_quoted = quote(title.encode('utf-8') + ': ' + msg.encode('utf-8'))
- URL = 'https://smsapi.free-mobile.fr/sendmsg?user=' + cust_id + '&pass=' + apiKey + '&msg=' + msg_quoted
+ msg = '{0}: {1}'.format(title, msg.strip())
+ msg_quoted = quote(msg.encode('utf-8'))
+ URL = 'https://smsapi.free-mobile.fr/sendmsg?user={user}&pass={api_key}&msg={msg}'.format(
+ user=cust_id,
+ api_key=apiKey,
+ msg=msg_quoted,
+ )
req = Request(URL)
# send the request to Free Mobile
|
{"golden_diff": "diff --git a/medusa/notifiers/freemobile.py b/medusa/notifiers/freemobile.py\n--- a/medusa/notifiers/freemobile.py\n+++ b/medusa/notifiers/freemobile.py\n@@ -3,7 +3,6 @@\n from __future__ import unicode_literals\n \n import logging\n-from builtins import object\n \n from medusa import app\n from medusa.common import (\n@@ -47,9 +46,13 @@\n log.debug(u'Free Mobile in use with API KEY: {0}', apiKey)\n \n # build up the URL and parameters\n- msg = msg.strip()\n- msg_quoted = quote(title.encode('utf-8') + ': ' + msg.encode('utf-8'))\n- URL = 'https://smsapi.free-mobile.fr/sendmsg?user=' + cust_id + '&pass=' + apiKey + '&msg=' + msg_quoted\n+ msg = '{0}: {1}'.format(title, msg.strip())\n+ msg_quoted = quote(msg.encode('utf-8'))\n+ URL = 'https://smsapi.free-mobile.fr/sendmsg?user={user}&pass={api_key}&msg={msg}'.format(\n+ user=cust_id,\n+ api_key=apiKey,\n+ msg=msg_quoted,\n+ )\n \n req = Request(URL)\n # send the request to Free Mobile\n", "issue": "Free Mobile SMS notification failed\nHi, \r\nI recently update Medusa, and since then, I didn't receive any SMS for started/finished downloads\r\n\r\nOn the error log file I have this:\r\n```\r\n2019-06-22 15:42:51 ERROR Thread_2 :: [74c3f12] Exception generated: can't concat str to bytes\r\nTraceback (most recent call last):\r\n File \"C:\\Medusa\\Medusa\\medusa\\server\\web\\core\\base.py\", line 261, in async_call\r\n result = function(**kwargs)\r\n File \"C:\\Medusa\\Medusa\\medusa\\server\\web\\home\\handler.py\", line 300, in testFreeMobile\r\n result, message = notifiers.freemobile_notifier.test_notify(freemobile_id, freemobile_apikey)\r\n File \"C:\\Medusa\\Medusa\\medusa\\notifiers\\freemobile.py\", line 30, in test_notify\r\n return self._notifyFreeMobile('Test', 'This is a test notification from Medusa', cust_id, apiKey, force=True)\r\n File \"C:\\Medusa\\Medusa\\medusa\\notifiers\\freemobile.py\", line 120, in _notifyFreeMobile\r\n return self._sendFreeMobileSMS(title, message, cust_id, apiKey)\r\n File \"C:\\Medusa\\Medusa\\medusa\\notifiers\\freemobile.py\", line 51, in _sendFreeMobileSMS\r\n msg_quoted = quote(title.encode('utf-8') + ': ' + msg.encode('utf-8'))\r\nTypeError: can't concat str to bytes\r\n```\r\nMy Free Mobile customer ID and my Free Mobile API Key are still the same..\r\n\r\nThanks in advance for any help you may provide.\r\nRegards.\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nimport logging\nfrom builtins import object\n\nfrom medusa import app\nfrom medusa.common import (\n NOTIFY_DOWNLOAD,\n NOTIFY_GIT_UPDATE,\n NOTIFY_GIT_UPDATE_TEXT,\n NOTIFY_LOGIN,\n NOTIFY_LOGIN_TEXT,\n NOTIFY_SUBTITLE_DOWNLOAD,\n notifyStrings,\n)\nfrom medusa.logger.adapters.style import BraceAdapter\n\nfrom requests.compat import quote\n\nfrom six.moves.urllib.request import Request, urlopen\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass Notifier(object):\n def test_notify(self, cust_id=None, apiKey=None):\n return self._notifyFreeMobile('Test', 'This is a test notification from Medusa', cust_id, apiKey, force=True)\n\n def _sendFreeMobileSMS(self, title, msg, cust_id=None, apiKey=None):\n \"\"\"\n Send a SMS notification\n\n msg: The message to send (unicode)\n title: The title of the message\n userKey: The pushover user id to send the message to (or to subscribe with)\n\n return: True if the message succeeded, False otherwise\n \"\"\"\n if cust_id is None:\n cust_id = app.FREEMOBILE_ID\n if apiKey is None:\n apiKey = app.FREEMOBILE_APIKEY\n\n log.debug(u'Free Mobile in use with API KEY: {0}', apiKey)\n\n # build up the URL and parameters\n msg = msg.strip()\n msg_quoted = quote(title.encode('utf-8') + ': ' + msg.encode('utf-8'))\n URL = 'https://smsapi.free-mobile.fr/sendmsg?user=' + cust_id + '&pass=' + apiKey + '&msg=' + msg_quoted\n\n req = Request(URL)\n # send the request to Free Mobile\n try:\n urlopen(req)\n except IOError as e:\n if hasattr(e, 'code'):\n error_message = {\n 400: 'Missing parameter(s).',\n 402: 'Too much SMS sent in a short time.',\n 403: 'API service is not enabled in your account or ID / API key is incorrect.',\n 500: 'Server error. Please retry in few moment.',\n }\n message = error_message.get(e.code)\n if message:\n log.error(message)\n return False, message\n except Exception as e:\n message = u'Error while sending SMS: {0}'.format(e)\n log.error(message)\n return False, message\n\n message = 'Free Mobile SMS successful.'\n log.info(message)\n return True, message\n\n def notify_snatch(self, title, message):\n if app.FREEMOBILE_NOTIFY_ONSNATCH:\n self._notifyFreeMobile(title, message)\n\n def notify_download(self, ep_obj, title=notifyStrings[NOTIFY_DOWNLOAD]):\n if app.FREEMOBILE_NOTIFY_ONDOWNLOAD:\n self._notifyFreeMobile(title, ep_obj.pretty_name_with_quality())\n\n def notify_subtitle_download(self, ep_obj, lang, title=notifyStrings[NOTIFY_SUBTITLE_DOWNLOAD]):\n if app.FREEMOBILE_NOTIFY_ONSUBTITLEDOWNLOAD:\n self._notifyFreeMobile(title, ep_obj.pretty_name() + ': ' + lang)\n\n def notify_git_update(self, new_version='??'):\n if app.USE_FREEMOBILE:\n update_text = notifyStrings[NOTIFY_GIT_UPDATE_TEXT]\n title = notifyStrings[NOTIFY_GIT_UPDATE]\n self._notifyFreeMobile(title, update_text + new_version)\n\n def notify_login(self, ipaddress=''):\n if app.USE_FREEMOBILE:\n update_text = notifyStrings[NOTIFY_LOGIN_TEXT]\n title = notifyStrings[NOTIFY_LOGIN]\n self._notifyFreeMobile(title, update_text.format(ipaddress))\n\n def _notifyFreeMobile(self, title, message, cust_id=None, apiKey=None, force=False): # pylint: disable=too-many-arguments\n \"\"\"\n Sends a SMS notification\n\n title: The title of the notification to send\n message: The message string to send\n cust_id: Your Free Mobile customer ID\n apikey: Your Free Mobile API key\n force: Enforce sending, for instance for testing\n \"\"\"\n\n if not app.USE_FREEMOBILE and not force:\n log.debug(u'Notification for Free Mobile not enabled, skipping this notification')\n return False, 'Disabled'\n\n log.debug(u'Sending a SMS for {0}', message)\n\n return self._sendFreeMobileSMS(title, message, cust_id, apiKey)\n", "path": "medusa/notifiers/freemobile.py"}]}
| 2,197 | 299 |
gh_patches_debug_22172
|
rasdani/github-patches
|
git_diff
|
pretalx__pretalx-77
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
deleting a room, which is in use yields ProtectedError
When you try to delete a room, which has talks assigned to it the app displays an ProtectedError. This can probably be handled it [deleting a submission type](https://github.com/openeventstack/pretalx/blob/master/src/pretalx/orga/views/cfp.py#L147-L152).
</issue>
<code>
[start of src/pretalx/orga/views/settings.py]
1 import string
2
3 from django.contrib import messages
4 from django.contrib.auth import login
5 from django.core.exceptions import PermissionDenied
6 from django.shortcuts import redirect
7 from django.urls import reverse
8 from django.utils.crypto import get_random_string
9 from django.utils.functional import cached_property
10 from django.utils.translation import ugettext_lazy as _
11 from django.views.generic import FormView, TemplateView, View
12
13 from pretalx.common.mail import mail_send_task
14 from pretalx.common.urls import build_absolute_uri
15 from pretalx.common.views import ActionFromUrl, CreateOrUpdateView
16 from pretalx.event.models import Event
17 from pretalx.orga.forms import EventForm
18 from pretalx.orga.forms.event import MailSettingsForm
19 from pretalx.person.forms import LoginInfoForm, UserForm, OrgaProfileForm
20 from pretalx.person.models import EventPermission, User
21 from pretalx.schedule.forms import RoomForm
22 from pretalx.schedule.models import Room
23
24
25 class EventDetail(ActionFromUrl, CreateOrUpdateView):
26 model = Event
27 form_class = EventForm
28 template_name = 'orga/settings/form.html'
29
30 def dispatch(self, request, *args, **kwargs):
31 if self._action == 'create':
32 if not request.user.is_anonymous and not request.user.is_superuser:
33 raise PermissionDenied()
34 return super().dispatch(request, *args, **kwargs)
35
36 def get_object(self):
37 try:
38 return self.request.event
39 except AttributeError:
40 return
41
42 def get_success_url(self) -> str:
43 return self.object.orga_urls.settings
44
45 def form_valid(self, form):
46 new_event = not bool(form.instance.pk)
47 ret = super().form_valid(form)
48 if new_event:
49 messages.success(self.request, _('Yay, a new event! Check the settings and configure a CfP and you\'re good to go!'))
50 form.instance.log_action('pretalx.event.create', person=self.request.user, orga=True)
51 EventPermission.objects.create(
52 event=form.instance,
53 user=self.request.user,
54 is_orga=True,
55 )
56 else:
57 form.instance.log_action('pretalx.event.update', person=self.request.user, orga=True)
58 messages.success(self.request, _('The event settings have been saved.'))
59 return ret
60
61
62 class EventMailSettings(ActionFromUrl, FormView):
63 form_class = MailSettingsForm
64 template_name = 'orga/settings/mail.html'
65
66 def get_success_url(self) -> str:
67 return self.request.event.orga_urls.mail_settings
68
69 def get_form_kwargs(self):
70 kwargs = super().get_form_kwargs()
71 kwargs['obj'] = self.request.event
72 kwargs['attribute_name'] = 'settings'
73 kwargs['locales'] = self.request.event.locales
74 return kwargs
75
76 def form_valid(self, form):
77 form.save()
78
79 if self.request.POST.get('test', '0').strip() == '1':
80 backend = self.request.event.get_mail_backend(force_custom=True)
81 try:
82 backend.test(self.request.event.settings.mail_from)
83 except Exception as e:
84 messages.warning(self.request, _('An error occured while contacting the SMTP server: %s') % str(e))
85 return redirect(reverse('orga:settings.mail.edit', kwargs={'event': self.request.event.slug}))
86 else:
87 if form.cleaned_data.get('smtp_use_custom'):
88 messages.success(self.request, _('Yay, your changes have been saved and the connection attempt to '
89 'your SMTP server was successful.'))
90 else:
91 messages.success(self.request, _('We\'ve been able to contact the SMTP server you configured. '
92 'Remember to check the "use custom SMTP server" checkbox, '
93 'otherwise your SMTP server will not be used.'))
94 else:
95 messages.success(self.request, _('Yay! We saved your changes.'))
96
97 ret = super().form_valid(form)
98 return ret
99
100
101 class EventTeam(TemplateView):
102 template_name = 'orga/settings/team.html'
103
104 def get_context_data(self, *args, **kwargs):
105 ctx = super().get_context_data(*args, **kwargs)
106 event = self.request.event
107 ctx['team'] = User.objects.filter(
108 permissions__is_orga=True,
109 permissions__event=event,
110 )
111 ctx['pending'] = EventPermission.objects.filter(event=event, user__isnull=True, is_orga=True)
112 return ctx
113
114
115 class EventTeamInvite(View):
116
117 def post(self, request, event):
118 email = request.POST.get('email')
119 event = request.event
120 invitation_token = get_random_string(allowed_chars=string.ascii_lowercase + string.digits, length=20)
121 invitation_link = build_absolute_uri('orga:invitation.view', kwargs={'code': invitation_token})
122 EventPermission.objects.create(
123 event=event,
124 invitation_email=email,
125 invitation_token=invitation_token,
126 is_orga=True,
127 )
128 invitation_text = _('''Hi!
129
130 You have been invited to the orga crew of {event} - Please click here to accept:
131
132 {invitation_link}
133
134 See you there,
135 The {event} orga crew (minus you)''').format(event=event.name, invitation_link=invitation_link)
136 mail_send_task.apply_async(args=(
137 [email],
138 _('You have been invited to the orga crew of {event}').format(event=request.event.name),
139 invitation_text,
140 request.event.email,
141 event.pk
142 ))
143 request.event.log_action('pretalx.event.invite.orga.send', person=request.user, orga=True)
144 messages.success(
145 request,
146 _('<{email}> has been invited to your team - more team members help distribute the workload, so … yay!').format(email=email)
147 )
148 return redirect(request.event.orga_urls.team_settings)
149
150
151 class EventTeamRetract(View):
152
153 def dispatch(self, request, event, pk):
154 EventPermission.objects.filter(event=request.event, pk=pk).delete()
155 request.event.log_action('pretalx.event.invite.orga.retract', person=request.user, orga=True)
156 return redirect(request.event.orga_urls.team_settings)
157
158
159 class EventTeamDelete(View):
160
161 def dispatch(self, request, event, pk):
162 EventPermission.objects.filter(event=request.event, user__id=pk).update(is_orga=False)
163 return redirect(request.event.orga_urls.team_settings)
164
165
166 class InvitationView(FormView):
167 template_name = 'orga/invitation.html'
168 form_class = UserForm
169
170 def get_context_data(self, *args, **kwargs):
171 ctx = super().get_context_data(*args, **kwargs)
172 ctx['invitation'] = EventPermission.objects.get(
173 invitation_token=self.kwargs.get('code'),
174 )
175 return ctx
176
177 def form_valid(self, form):
178 form.save()
179 permission = EventPermission.objects.get(invitation_token=self.kwargs.get('code'))
180 user = User.objects.get(pk=form.cleaned_data.get('user_id'))
181
182 permission.is_orga = True
183 permission.user = user
184 permission.save()
185 permission.event.log_action('pretalx.event.invite.orga.accept', person=user, orga=True)
186 login(self.request, user)
187 return redirect(permission.event.orga_urls.base)
188
189
190 class UserSettings(TemplateView):
191 form_class = LoginInfoForm
192 template_name = 'orga/user.html'
193
194 def get_success_url(self) -> str:
195 return reverse('orga:user.view')
196
197 @cached_property
198 def login_form(self):
199 return LoginInfoForm(user=self.request.user,
200 data=(self.request.POST
201 if self.request.method == 'POST' and self.request.POST.get('form') == 'login'
202 else None))
203
204 @cached_property
205 def profile_form(self):
206 return OrgaProfileForm(instance=self.request.user,
207 data=(self.request.POST
208 if self.request.method == 'POST' and self.request.POST.get('form') == 'profile'
209 else None))
210
211 def get_context_data(self, **kwargs):
212 ctx = super().get_context_data(**kwargs)
213 ctx['login_form'] = self.login_form
214 ctx['profile_form'] = self.profile_form
215 return ctx
216
217 def post(self, request, *args, **kwargs):
218 if self.login_form.is_bound:
219 if self.login_form.is_valid():
220 self.login_form.save()
221 messages.success(request, _('Your changes have been saved.'))
222 request.user.log_action('pretalx.user.password.update')
223 return redirect(self.get_success_url())
224 elif self.profile_form.is_bound:
225 if self.profile_form.is_valid():
226 self.profile_form.save()
227 messages.success(request, _('Your changes have been saved.'))
228 request.user.log_action('pretalx.user.profile.update')
229 return redirect(self.get_success_url())
230
231 messages.error(self.request, _('Oh :( We had trouble saving your input. See below for details.'))
232 return super().get(request, *args, **kwargs)
233
234
235 class RoomList(TemplateView):
236 template_name = 'orga/settings/room_list.html'
237
238
239 class RoomDelete(View):
240
241 def dispatch(self, request, event, pk):
242 request.event.rooms.get(pk=pk).delete()
243 messages.success(self.request, _('Room deleted. Hopefully nobody was still in there …'))
244 return redirect(request.event.orga_urls.room_settings)
245
246
247 class RoomDetail(ActionFromUrl, CreateOrUpdateView):
248 model = Room
249 form_class = RoomForm
250 template_name = 'orga/settings/room_form.html'
251
252 def get_object(self):
253 try:
254 return self.request.event.rooms.get(pk=self.kwargs['pk'])
255 except (Room.DoesNotExist, KeyError):
256 return
257
258 def get_success_url(self) -> str:
259 return self.request.event.orga_urls.room_settings
260
261 def form_valid(self, form):
262 created = not bool(form.instance.pk)
263 form.instance.event = self.request.event
264 ret = super().form_valid(form)
265 messages.success(self.request, _('Saved!'))
266 if created:
267 form.instance.log_action('pretalx.room.create', person=self.request.user, orga=True)
268 else:
269 form.instance.log_action('pretalx.event.update', person=self.request.user, orga=True)
270 return ret
271
[end of src/pretalx/orga/views/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/pretalx/orga/views/settings.py b/src/pretalx/orga/views/settings.py
--- a/src/pretalx/orga/views/settings.py
+++ b/src/pretalx/orga/views/settings.py
@@ -3,6 +3,7 @@
from django.contrib import messages
from django.contrib.auth import login
from django.core.exceptions import PermissionDenied
+from django.db.models.deletion import ProtectedError
from django.shortcuts import redirect
from django.urls import reverse
from django.utils.crypto import get_random_string
@@ -239,8 +240,12 @@
class RoomDelete(View):
def dispatch(self, request, event, pk):
- request.event.rooms.get(pk=pk).delete()
- messages.success(self.request, _('Room deleted. Hopefully nobody was still in there …'))
+ try:
+ request.event.rooms.get(pk=pk).delete()
+ messages.success(self.request, _('Room deleted. Hopefully nobody was still in there …'))
+ except ProtectedError: # TODO: show which/how many talks are concerned
+ messages.error(request, _('There is or was a talk scheduled in this room. It cannot be deleted.'))
+
return redirect(request.event.orga_urls.room_settings)
|
{"golden_diff": "diff --git a/src/pretalx/orga/views/settings.py b/src/pretalx/orga/views/settings.py\n--- a/src/pretalx/orga/views/settings.py\n+++ b/src/pretalx/orga/views/settings.py\n@@ -3,6 +3,7 @@\n from django.contrib import messages\n from django.contrib.auth import login\n from django.core.exceptions import PermissionDenied\n+from django.db.models.deletion import ProtectedError\n from django.shortcuts import redirect\n from django.urls import reverse\n from django.utils.crypto import get_random_string\n@@ -239,8 +240,12 @@\n class RoomDelete(View):\n \n def dispatch(self, request, event, pk):\n- request.event.rooms.get(pk=pk).delete()\n- messages.success(self.request, _('Room deleted. Hopefully nobody was still in there \u2026'))\n+ try:\n+ request.event.rooms.get(pk=pk).delete()\n+ messages.success(self.request, _('Room deleted. Hopefully nobody was still in there \u2026'))\n+ except ProtectedError: # TODO: show which/how many talks are concerned\n+ messages.error(request, _('There is or was a talk scheduled in this room. It cannot be deleted.'))\n+\n return redirect(request.event.orga_urls.room_settings)\n", "issue": "deleting a room, which is in use yields ProtectedError\nWhen you try to delete a room, which has talks assigned to it the app displays an ProtectedError. This can probably be handled it [deleting a submission type](https://github.com/openeventstack/pretalx/blob/master/src/pretalx/orga/views/cfp.py#L147-L152).\n", "before_files": [{"content": "import string\n\nfrom django.contrib import messages\nfrom django.contrib.auth import login\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\nfrom django.utils.crypto import get_random_string\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic import FormView, TemplateView, View\n\nfrom pretalx.common.mail import mail_send_task\nfrom pretalx.common.urls import build_absolute_uri\nfrom pretalx.common.views import ActionFromUrl, CreateOrUpdateView\nfrom pretalx.event.models import Event\nfrom pretalx.orga.forms import EventForm\nfrom pretalx.orga.forms.event import MailSettingsForm\nfrom pretalx.person.forms import LoginInfoForm, UserForm, OrgaProfileForm\nfrom pretalx.person.models import EventPermission, User\nfrom pretalx.schedule.forms import RoomForm\nfrom pretalx.schedule.models import Room\n\n\nclass EventDetail(ActionFromUrl, CreateOrUpdateView):\n model = Event\n form_class = EventForm\n template_name = 'orga/settings/form.html'\n\n def dispatch(self, request, *args, **kwargs):\n if self._action == 'create':\n if not request.user.is_anonymous and not request.user.is_superuser:\n raise PermissionDenied()\n return super().dispatch(request, *args, **kwargs)\n\n def get_object(self):\n try:\n return self.request.event\n except AttributeError:\n return\n\n def get_success_url(self) -> str:\n return self.object.orga_urls.settings\n\n def form_valid(self, form):\n new_event = not bool(form.instance.pk)\n ret = super().form_valid(form)\n if new_event:\n messages.success(self.request, _('Yay, a new event! Check the settings and configure a CfP and you\\'re good to go!'))\n form.instance.log_action('pretalx.event.create', person=self.request.user, orga=True)\n EventPermission.objects.create(\n event=form.instance,\n user=self.request.user,\n is_orga=True,\n )\n else:\n form.instance.log_action('pretalx.event.update', person=self.request.user, orga=True)\n messages.success(self.request, _('The event settings have been saved.'))\n return ret\n\n\nclass EventMailSettings(ActionFromUrl, FormView):\n form_class = MailSettingsForm\n template_name = 'orga/settings/mail.html'\n\n def get_success_url(self) -> str:\n return self.request.event.orga_urls.mail_settings\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['obj'] = self.request.event\n kwargs['attribute_name'] = 'settings'\n kwargs['locales'] = self.request.event.locales\n return kwargs\n\n def form_valid(self, form):\n form.save()\n\n if self.request.POST.get('test', '0').strip() == '1':\n backend = self.request.event.get_mail_backend(force_custom=True)\n try:\n backend.test(self.request.event.settings.mail_from)\n except Exception as e:\n messages.warning(self.request, _('An error occured while contacting the SMTP server: %s') % str(e))\n return redirect(reverse('orga:settings.mail.edit', kwargs={'event': self.request.event.slug}))\n else:\n if form.cleaned_data.get('smtp_use_custom'):\n messages.success(self.request, _('Yay, your changes have been saved and the connection attempt to '\n 'your SMTP server was successful.'))\n else:\n messages.success(self.request, _('We\\'ve been able to contact the SMTP server you configured. '\n 'Remember to check the \"use custom SMTP server\" checkbox, '\n 'otherwise your SMTP server will not be used.'))\n else:\n messages.success(self.request, _('Yay! We saved your changes.'))\n\n ret = super().form_valid(form)\n return ret\n\n\nclass EventTeam(TemplateView):\n template_name = 'orga/settings/team.html'\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n event = self.request.event\n ctx['team'] = User.objects.filter(\n permissions__is_orga=True,\n permissions__event=event,\n )\n ctx['pending'] = EventPermission.objects.filter(event=event, user__isnull=True, is_orga=True)\n return ctx\n\n\nclass EventTeamInvite(View):\n\n def post(self, request, event):\n email = request.POST.get('email')\n event = request.event\n invitation_token = get_random_string(allowed_chars=string.ascii_lowercase + string.digits, length=20)\n invitation_link = build_absolute_uri('orga:invitation.view', kwargs={'code': invitation_token})\n EventPermission.objects.create(\n event=event,\n invitation_email=email,\n invitation_token=invitation_token,\n is_orga=True,\n )\n invitation_text = _('''Hi!\n\nYou have been invited to the orga crew of {event} - Please click here to accept:\n\n {invitation_link}\n\nSee you there,\nThe {event} orga crew (minus you)''').format(event=event.name, invitation_link=invitation_link)\n mail_send_task.apply_async(args=(\n [email],\n _('You have been invited to the orga crew of {event}').format(event=request.event.name),\n invitation_text,\n request.event.email,\n event.pk\n ))\n request.event.log_action('pretalx.event.invite.orga.send', person=request.user, orga=True)\n messages.success(\n request,\n _('<{email}> has been invited to your team - more team members help distribute the workload, so \u2026 yay!').format(email=email)\n )\n return redirect(request.event.orga_urls.team_settings)\n\n\nclass EventTeamRetract(View):\n\n def dispatch(self, request, event, pk):\n EventPermission.objects.filter(event=request.event, pk=pk).delete()\n request.event.log_action('pretalx.event.invite.orga.retract', person=request.user, orga=True)\n return redirect(request.event.orga_urls.team_settings)\n\n\nclass EventTeamDelete(View):\n\n def dispatch(self, request, event, pk):\n EventPermission.objects.filter(event=request.event, user__id=pk).update(is_orga=False)\n return redirect(request.event.orga_urls.team_settings)\n\n\nclass InvitationView(FormView):\n template_name = 'orga/invitation.html'\n form_class = UserForm\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['invitation'] = EventPermission.objects.get(\n invitation_token=self.kwargs.get('code'),\n )\n return ctx\n\n def form_valid(self, form):\n form.save()\n permission = EventPermission.objects.get(invitation_token=self.kwargs.get('code'))\n user = User.objects.get(pk=form.cleaned_data.get('user_id'))\n\n permission.is_orga = True\n permission.user = user\n permission.save()\n permission.event.log_action('pretalx.event.invite.orga.accept', person=user, orga=True)\n login(self.request, user)\n return redirect(permission.event.orga_urls.base)\n\n\nclass UserSettings(TemplateView):\n form_class = LoginInfoForm\n template_name = 'orga/user.html'\n\n def get_success_url(self) -> str:\n return reverse('orga:user.view')\n\n @cached_property\n def login_form(self):\n return LoginInfoForm(user=self.request.user,\n data=(self.request.POST\n if self.request.method == 'POST' and self.request.POST.get('form') == 'login'\n else None))\n\n @cached_property\n def profile_form(self):\n return OrgaProfileForm(instance=self.request.user,\n data=(self.request.POST\n if self.request.method == 'POST' and self.request.POST.get('form') == 'profile'\n else None))\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n ctx['login_form'] = self.login_form\n ctx['profile_form'] = self.profile_form\n return ctx\n\n def post(self, request, *args, **kwargs):\n if self.login_form.is_bound:\n if self.login_form.is_valid():\n self.login_form.save()\n messages.success(request, _('Your changes have been saved.'))\n request.user.log_action('pretalx.user.password.update')\n return redirect(self.get_success_url())\n elif self.profile_form.is_bound:\n if self.profile_form.is_valid():\n self.profile_form.save()\n messages.success(request, _('Your changes have been saved.'))\n request.user.log_action('pretalx.user.profile.update')\n return redirect(self.get_success_url())\n\n messages.error(self.request, _('Oh :( We had trouble saving your input. See below for details.'))\n return super().get(request, *args, **kwargs)\n\n\nclass RoomList(TemplateView):\n template_name = 'orga/settings/room_list.html'\n\n\nclass RoomDelete(View):\n\n def dispatch(self, request, event, pk):\n request.event.rooms.get(pk=pk).delete()\n messages.success(self.request, _('Room deleted. Hopefully nobody was still in there \u2026'))\n return redirect(request.event.orga_urls.room_settings)\n\n\nclass RoomDetail(ActionFromUrl, CreateOrUpdateView):\n model = Room\n form_class = RoomForm\n template_name = 'orga/settings/room_form.html'\n\n def get_object(self):\n try:\n return self.request.event.rooms.get(pk=self.kwargs['pk'])\n except (Room.DoesNotExist, KeyError):\n return\n\n def get_success_url(self) -> str:\n return self.request.event.orga_urls.room_settings\n\n def form_valid(self, form):\n created = not bool(form.instance.pk)\n form.instance.event = self.request.event\n ret = super().form_valid(form)\n messages.success(self.request, _('Saved!'))\n if created:\n form.instance.log_action('pretalx.room.create', person=self.request.user, orga=True)\n else:\n form.instance.log_action('pretalx.event.update', person=self.request.user, orga=True)\n return ret\n", "path": "src/pretalx/orga/views/settings.py"}]}
| 3,510 | 265 |
gh_patches_debug_54239
|
rasdani/github-patches
|
git_diff
|
celery__celery-6866
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Celery inspect output JSON serialization is broken
# Checklist
- [ ] I have verified that the issue exists against the `master` branch of Celery.
- [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [ ] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [ ] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [ ] I have verified that the issue exists against the `master` branch of Celery.
- [ ] I have included the contents of ``pip freeze`` in the issue.
- [ ] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [ ] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [ ] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Environment & Settings
**Celery version**: 5.1.2 (sun-harmonics)
# Steps to Reproduce
Execute `celery -A proj inspect -j active`
# Expected Behavior
Valid JSON string is returned, like:
```
{"worker-1": [], "worker-2": [], "worker-3": []}
```
# Actual Behavior
Command returns valid JSON in the first line, followed by double newline and some useless summary:
```
{"worker-1": [], "worker-2": [], "worker-3": []}
3 nodes online.
```
which makes the overall output an invalid JSON string.
</issue>
<code>
[start of celery/bin/control.py]
1 """The ``celery control``, ``. inspect`` and ``. status`` programs."""
2 from functools import partial
3
4 import click
5 from kombu.utils.json import dumps
6
7 from celery.bin.base import (COMMA_SEPARATED_LIST, CeleryCommand,
8 CeleryOption, handle_preload_options)
9 from celery.exceptions import CeleryCommandException
10 from celery.platforms import EX_UNAVAILABLE
11 from celery.utils import text
12 from celery.worker.control import Panel
13
14
15 def _say_remote_command_reply(ctx, replies, show_reply=False):
16 node = next(iter(replies)) # <-- take first.
17 reply = replies[node]
18 node = ctx.obj.style(f'{node}: ', fg='cyan', bold=True)
19 status, preply = ctx.obj.pretty(reply)
20 ctx.obj.say_chat('->', f'{node}{status}',
21 text.indent(preply, 4) if show_reply else '',
22 show_body=show_reply)
23
24
25 def _consume_arguments(meta, method, args):
26 i = 0
27 try:
28 for i, arg in enumerate(args):
29 try:
30 name, typ = meta.args[i]
31 except IndexError:
32 if meta.variadic:
33 break
34 raise click.UsageError(
35 'Command {!r} takes arguments: {}'.format(
36 method, meta.signature))
37 else:
38 yield name, typ(arg) if typ is not None else arg
39 finally:
40 args[:] = args[i:]
41
42
43 def _compile_arguments(action, args):
44 meta = Panel.meta[action]
45 arguments = {}
46 if meta.args:
47 arguments.update({
48 k: v for k, v in _consume_arguments(meta, action, args)
49 })
50 if meta.variadic:
51 arguments.update({meta.variadic: args})
52 return arguments
53
54
55 @click.command(cls=CeleryCommand)
56 @click.option('-t',
57 '--timeout',
58 cls=CeleryOption,
59 type=float,
60 default=1.0,
61 help_group='Remote Control Options',
62 help='Timeout in seconds waiting for reply.')
63 @click.option('-d',
64 '--destination',
65 cls=CeleryOption,
66 type=COMMA_SEPARATED_LIST,
67 help_group='Remote Control Options',
68 help='Comma separated list of destination node names.')
69 @click.option('-j',
70 '--json',
71 cls=CeleryOption,
72 is_flag=True,
73 help_group='Remote Control Options',
74 help='Use json as output format.')
75 @click.pass_context
76 @handle_preload_options
77 def status(ctx, timeout, destination, json, **kwargs):
78 """Show list of workers that are online."""
79 callback = None if json else partial(_say_remote_command_reply, ctx)
80 replies = ctx.obj.app.control.inspect(timeout=timeout,
81 destination=destination,
82 callback=callback).ping()
83
84 if not replies:
85 raise CeleryCommandException(
86 message='No nodes replied within time constraint',
87 exit_code=EX_UNAVAILABLE
88 )
89
90 if json:
91 ctx.obj.echo(dumps(replies))
92 nodecount = len(replies)
93 if not kwargs.get('quiet', False):
94 ctx.obj.echo('\n{} {} online.'.format(
95 nodecount, text.pluralize(nodecount, 'node')))
96
97
98 @click.command(cls=CeleryCommand,
99 context_settings={'allow_extra_args': True})
100 @click.argument("action", type=click.Choice([
101 name for name, info in Panel.meta.items()
102 if info.type == 'inspect' and info.visible
103 ]))
104 @click.option('-t',
105 '--timeout',
106 cls=CeleryOption,
107 type=float,
108 default=1.0,
109 help_group='Remote Control Options',
110 help='Timeout in seconds waiting for reply.')
111 @click.option('-d',
112 '--destination',
113 cls=CeleryOption,
114 type=COMMA_SEPARATED_LIST,
115 help_group='Remote Control Options',
116 help='Comma separated list of destination node names.')
117 @click.option('-j',
118 '--json',
119 cls=CeleryOption,
120 is_flag=True,
121 help_group='Remote Control Options',
122 help='Use json as output format.')
123 @click.pass_context
124 @handle_preload_options
125 def inspect(ctx, action, timeout, destination, json, **kwargs):
126 """Inspect the worker at runtime.
127
128 Availability: RabbitMQ (AMQP) and Redis transports.
129 """
130 callback = None if json else partial(_say_remote_command_reply, ctx,
131 show_reply=True)
132 arguments = _compile_arguments(action, ctx.args)
133 inspect = ctx.obj.app.control.inspect(timeout=timeout,
134 destination=destination,
135 callback=callback)
136 replies = inspect._request(action,
137 **arguments)
138
139 if not replies:
140 raise CeleryCommandException(
141 message='No nodes replied within time constraint',
142 exit_code=EX_UNAVAILABLE
143 )
144
145 if json:
146 ctx.obj.echo(dumps(replies))
147 nodecount = len(replies)
148 if not ctx.obj.quiet:
149 ctx.obj.echo('\n{} {} online.'.format(
150 nodecount, text.pluralize(nodecount, 'node')))
151
152
153 @click.command(cls=CeleryCommand,
154 context_settings={'allow_extra_args': True})
155 @click.argument("action", type=click.Choice([
156 name for name, info in Panel.meta.items()
157 if info.type == 'control' and info.visible
158 ]))
159 @click.option('-t',
160 '--timeout',
161 cls=CeleryOption,
162 type=float,
163 default=1.0,
164 help_group='Remote Control Options',
165 help='Timeout in seconds waiting for reply.')
166 @click.option('-d',
167 '--destination',
168 cls=CeleryOption,
169 type=COMMA_SEPARATED_LIST,
170 help_group='Remote Control Options',
171 help='Comma separated list of destination node names.')
172 @click.option('-j',
173 '--json',
174 cls=CeleryOption,
175 is_flag=True,
176 help_group='Remote Control Options',
177 help='Use json as output format.')
178 @click.pass_context
179 @handle_preload_options
180 def control(ctx, action, timeout, destination, json):
181 """Workers remote control.
182
183 Availability: RabbitMQ (AMQP), Redis, and MongoDB transports.
184 """
185 callback = None if json else partial(_say_remote_command_reply, ctx,
186 show_reply=True)
187 args = ctx.args
188 arguments = _compile_arguments(action, args)
189 replies = ctx.obj.app.control.broadcast(action, timeout=timeout,
190 destination=destination,
191 callback=callback,
192 reply=True,
193 arguments=arguments)
194
195 if not replies:
196 raise CeleryCommandException(
197 message='No nodes replied within time constraint',
198 exit_code=EX_UNAVAILABLE
199 )
200
201 if json:
202 ctx.obj.echo(dumps(replies))
203
[end of celery/bin/control.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/celery/bin/control.py b/celery/bin/control.py
--- a/celery/bin/control.py
+++ b/celery/bin/control.py
@@ -144,6 +144,8 @@
if json:
ctx.obj.echo(dumps(replies))
+ return
+
nodecount = len(replies)
if not ctx.obj.quiet:
ctx.obj.echo('\n{} {} online.'.format(
|
{"golden_diff": "diff --git a/celery/bin/control.py b/celery/bin/control.py\n--- a/celery/bin/control.py\n+++ b/celery/bin/control.py\n@@ -144,6 +144,8 @@\n \n if json:\n ctx.obj.echo(dumps(replies))\n+ return\n+\n nodecount = len(replies)\n if not ctx.obj.quiet:\n ctx.obj.echo('\\n{} {} online.'.format(\n", "issue": "Celery inspect output JSON serialization is broken\n# Checklist\r\n- [ ] I have verified that the issue exists against the `master` branch of Celery.\r\n- [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.\r\n- [x] I have read the relevant section in the\r\n [contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)\r\n on reporting bugs.\r\n- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)\r\n for similar or identical bug reports.\r\n- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)\r\n for existing proposed fixes.\r\n- [ ] I have checked the [commit log](https://github.com/celery/celery/commits/master)\r\n to find out if the bug was already fixed in the master branch.\r\n- [x] I have included all related issues and possible duplicate issues\r\n in this issue (If there are none, check this box anyway).\r\n\r\n## Mandatory Debugging Information\r\n\r\n- [ ] I have included the output of ``celery -A proj report`` in the issue.\r\n (if you are not able to do this, then at least specify the Celery\r\n version affected).\r\n- [ ] I have verified that the issue exists against the `master` branch of Celery.\r\n- [ ] I have included the contents of ``pip freeze`` in the issue.\r\n- [ ] I have included all the versions of all the external dependencies required\r\n to reproduce this bug.\r\n\r\n## Optional Debugging Information\r\n- [ ] I have tried reproducing the issue on more than one Python version\r\n and/or implementation.\r\n- [ ] I have tried reproducing the issue on more than one message broker and/or\r\n result backend.\r\n- [ ] I have tried reproducing the issue on more than one version of the message\r\n broker and/or result backend.\r\n- [ ] I have tried reproducing the issue on more than one operating system.\r\n- [ ] I have tried reproducing the issue on more than one workers pool.\r\n- [ ] I have tried reproducing the issue with autoscaling, retries,\r\n ETA/Countdown & rate limits disabled.\r\n- [ ] I have tried reproducing the issue after downgrading\r\n and/or upgrading Celery and its dependencies.\r\n\r\n## Environment & Settings\r\n**Celery version**: 5.1.2 (sun-harmonics)\r\n\r\n# Steps to Reproduce\r\n\r\nExecute `celery -A proj inspect -j active`\r\n\r\n# Expected Behavior\r\nValid JSON string is returned, like: \r\n```\r\n{\"worker-1\": [], \"worker-2\": [], \"worker-3\": []}\r\n```\r\n\r\n# Actual Behavior\r\nCommand returns valid JSON in the first line, followed by double newline and some useless summary:\r\n```\r\n{\"worker-1\": [], \"worker-2\": [], \"worker-3\": []}\r\n\r\n3 nodes online.\r\n```\r\nwhich makes the overall output an invalid JSON string.\n", "before_files": [{"content": "\"\"\"The ``celery control``, ``. inspect`` and ``. status`` programs.\"\"\"\nfrom functools import partial\n\nimport click\nfrom kombu.utils.json import dumps\n\nfrom celery.bin.base import (COMMA_SEPARATED_LIST, CeleryCommand,\n CeleryOption, handle_preload_options)\nfrom celery.exceptions import CeleryCommandException\nfrom celery.platforms import EX_UNAVAILABLE\nfrom celery.utils import text\nfrom celery.worker.control import Panel\n\n\ndef _say_remote_command_reply(ctx, replies, show_reply=False):\n node = next(iter(replies)) # <-- take first.\n reply = replies[node]\n node = ctx.obj.style(f'{node}: ', fg='cyan', bold=True)\n status, preply = ctx.obj.pretty(reply)\n ctx.obj.say_chat('->', f'{node}{status}',\n text.indent(preply, 4) if show_reply else '',\n show_body=show_reply)\n\n\ndef _consume_arguments(meta, method, args):\n i = 0\n try:\n for i, arg in enumerate(args):\n try:\n name, typ = meta.args[i]\n except IndexError:\n if meta.variadic:\n break\n raise click.UsageError(\n 'Command {!r} takes arguments: {}'.format(\n method, meta.signature))\n else:\n yield name, typ(arg) if typ is not None else arg\n finally:\n args[:] = args[i:]\n\n\ndef _compile_arguments(action, args):\n meta = Panel.meta[action]\n arguments = {}\n if meta.args:\n arguments.update({\n k: v for k, v in _consume_arguments(meta, action, args)\n })\n if meta.variadic:\n arguments.update({meta.variadic: args})\n return arguments\n\n\[email protected](cls=CeleryCommand)\[email protected]('-t',\n '--timeout',\n cls=CeleryOption,\n type=float,\n default=1.0,\n help_group='Remote Control Options',\n help='Timeout in seconds waiting for reply.')\[email protected]('-d',\n '--destination',\n cls=CeleryOption,\n type=COMMA_SEPARATED_LIST,\n help_group='Remote Control Options',\n help='Comma separated list of destination node names.')\[email protected]('-j',\n '--json',\n cls=CeleryOption,\n is_flag=True,\n help_group='Remote Control Options',\n help='Use json as output format.')\[email protected]_context\n@handle_preload_options\ndef status(ctx, timeout, destination, json, **kwargs):\n \"\"\"Show list of workers that are online.\"\"\"\n callback = None if json else partial(_say_remote_command_reply, ctx)\n replies = ctx.obj.app.control.inspect(timeout=timeout,\n destination=destination,\n callback=callback).ping()\n\n if not replies:\n raise CeleryCommandException(\n message='No nodes replied within time constraint',\n exit_code=EX_UNAVAILABLE\n )\n\n if json:\n ctx.obj.echo(dumps(replies))\n nodecount = len(replies)\n if not kwargs.get('quiet', False):\n ctx.obj.echo('\\n{} {} online.'.format(\n nodecount, text.pluralize(nodecount, 'node')))\n\n\[email protected](cls=CeleryCommand,\n context_settings={'allow_extra_args': True})\[email protected](\"action\", type=click.Choice([\n name for name, info in Panel.meta.items()\n if info.type == 'inspect' and info.visible\n]))\[email protected]('-t',\n '--timeout',\n cls=CeleryOption,\n type=float,\n default=1.0,\n help_group='Remote Control Options',\n help='Timeout in seconds waiting for reply.')\[email protected]('-d',\n '--destination',\n cls=CeleryOption,\n type=COMMA_SEPARATED_LIST,\n help_group='Remote Control Options',\n help='Comma separated list of destination node names.')\[email protected]('-j',\n '--json',\n cls=CeleryOption,\n is_flag=True,\n help_group='Remote Control Options',\n help='Use json as output format.')\[email protected]_context\n@handle_preload_options\ndef inspect(ctx, action, timeout, destination, json, **kwargs):\n \"\"\"Inspect the worker at runtime.\n\n Availability: RabbitMQ (AMQP) and Redis transports.\n \"\"\"\n callback = None if json else partial(_say_remote_command_reply, ctx,\n show_reply=True)\n arguments = _compile_arguments(action, ctx.args)\n inspect = ctx.obj.app.control.inspect(timeout=timeout,\n destination=destination,\n callback=callback)\n replies = inspect._request(action,\n **arguments)\n\n if not replies:\n raise CeleryCommandException(\n message='No nodes replied within time constraint',\n exit_code=EX_UNAVAILABLE\n )\n\n if json:\n ctx.obj.echo(dumps(replies))\n nodecount = len(replies)\n if not ctx.obj.quiet:\n ctx.obj.echo('\\n{} {} online.'.format(\n nodecount, text.pluralize(nodecount, 'node')))\n\n\[email protected](cls=CeleryCommand,\n context_settings={'allow_extra_args': True})\[email protected](\"action\", type=click.Choice([\n name for name, info in Panel.meta.items()\n if info.type == 'control' and info.visible\n]))\[email protected]('-t',\n '--timeout',\n cls=CeleryOption,\n type=float,\n default=1.0,\n help_group='Remote Control Options',\n help='Timeout in seconds waiting for reply.')\[email protected]('-d',\n '--destination',\n cls=CeleryOption,\n type=COMMA_SEPARATED_LIST,\n help_group='Remote Control Options',\n help='Comma separated list of destination node names.')\[email protected]('-j',\n '--json',\n cls=CeleryOption,\n is_flag=True,\n help_group='Remote Control Options',\n help='Use json as output format.')\[email protected]_context\n@handle_preload_options\ndef control(ctx, action, timeout, destination, json):\n \"\"\"Workers remote control.\n\n Availability: RabbitMQ (AMQP), Redis, and MongoDB transports.\n \"\"\"\n callback = None if json else partial(_say_remote_command_reply, ctx,\n show_reply=True)\n args = ctx.args\n arguments = _compile_arguments(action, args)\n replies = ctx.obj.app.control.broadcast(action, timeout=timeout,\n destination=destination,\n callback=callback,\n reply=True,\n arguments=arguments)\n\n if not replies:\n raise CeleryCommandException(\n message='No nodes replied within time constraint',\n exit_code=EX_UNAVAILABLE\n )\n\n if json:\n ctx.obj.echo(dumps(replies))\n", "path": "celery/bin/control.py"}]}
| 3,199 | 97 |
gh_patches_debug_22529
|
rasdani/github-patches
|
git_diff
|
lutris__lutris-1227
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Vulkan check will not work on certain distributions
Note that this probably won't work on certain distributions, where this file, for instance, is actually called `/usr/lib/x86_64-linux-gnu/libvulkan.so.1` or similar. Confirmed not working on Linux Mint 19.
_Originally posted by @Vexatos in https://github.com/lutris/lutris/pull/1186_
</issue>
<code>
[start of lutris/util/vulkan.py]
1 """Vulkan helper module"""
2 import os
3 from enum import Enum
4
5 class vulkan_available(Enum):
6 NONE = 0
7 THIRTY_TWO = 1
8 SIXTY_FOUR = 2
9 ALL = 3
10
11 def vulkan_check():
12 vulkan_lib = os.path.isfile("/usr/lib/libvulkan.so")
13 vulkan_lib32 = os.path.isfile("/usr/lib32/libvulkan.so")
14 vulkan_lib_multi = os.path.isfile("/usr/lib/x86_64-linux-gnu/libvulkan.so")
15 vulkan_lib32_multi = os.path.isfile("/usr/lib32/i386-linux-gnu/libvulkan.so")
16 has_32_bit = vulkan_lib32 or vulkan_lib32_multi
17 has_64_bit = vulkan_lib or vulkan_lib_multi
18
19 if not (has_64_bit or has_32_bit):
20 return vulkan_available.NONE
21 if has_64_bit and not has_32_bit:
22 return vulkan_available.SIXTY_FOUR
23 if not has_64_bit and has_32_bit:
24 return vulkan_available.THIRTY_TWO
25 return vulkan_available.ALL
26
[end of lutris/util/vulkan.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lutris/util/vulkan.py b/lutris/util/vulkan.py
--- a/lutris/util/vulkan.py
+++ b/lutris/util/vulkan.py
@@ -1,5 +1,6 @@
"""Vulkan helper module"""
import os
+import re
from enum import Enum
class vulkan_available(Enum):
@@ -8,11 +9,20 @@
SIXTY_FOUR = 2
ALL = 3
+def search_for_file(directory):
+ if os.path.isdir(directory):
+ pattern = re.compile(r'^libvulkan\.so')
+ files = [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]
+ files = [os.path.join(directory, f) for f in files if pattern.search(f)]
+ if files:
+ return True
+ return False
+
def vulkan_check():
- vulkan_lib = os.path.isfile("/usr/lib/libvulkan.so")
- vulkan_lib32 = os.path.isfile("/usr/lib32/libvulkan.so")
- vulkan_lib_multi = os.path.isfile("/usr/lib/x86_64-linux-gnu/libvulkan.so")
- vulkan_lib32_multi = os.path.isfile("/usr/lib32/i386-linux-gnu/libvulkan.so")
+ vulkan_lib = search_for_file("/usr/lib")
+ vulkan_lib32 = search_for_file("/usr/lib32")
+ vulkan_lib_multi = search_for_file("/usr/lib/x86_64-linux-gnu")
+ vulkan_lib32_multi = search_for_file("/usr/lib32/i386-linux-gnu")
has_32_bit = vulkan_lib32 or vulkan_lib32_multi
has_64_bit = vulkan_lib or vulkan_lib_multi
|
{"golden_diff": "diff --git a/lutris/util/vulkan.py b/lutris/util/vulkan.py\n--- a/lutris/util/vulkan.py\n+++ b/lutris/util/vulkan.py\n@@ -1,5 +1,6 @@\n \"\"\"Vulkan helper module\"\"\"\n import os\n+import re\n from enum import Enum\n \n class vulkan_available(Enum):\n@@ -8,11 +9,20 @@\n SIXTY_FOUR = 2\n ALL = 3\n \n+def search_for_file(directory):\n+ if os.path.isdir(directory):\n+ pattern = re.compile(r'^libvulkan\\.so')\n+ files = [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]\n+ files = [os.path.join(directory, f) for f in files if pattern.search(f)]\n+ if files:\n+ return True\n+ return False\n+\n def vulkan_check():\n- vulkan_lib = os.path.isfile(\"/usr/lib/libvulkan.so\")\n- vulkan_lib32 = os.path.isfile(\"/usr/lib32/libvulkan.so\")\n- vulkan_lib_multi = os.path.isfile(\"/usr/lib/x86_64-linux-gnu/libvulkan.so\")\n- vulkan_lib32_multi = os.path.isfile(\"/usr/lib32/i386-linux-gnu/libvulkan.so\")\n+ vulkan_lib = search_for_file(\"/usr/lib\")\n+ vulkan_lib32 = search_for_file(\"/usr/lib32\")\n+ vulkan_lib_multi = search_for_file(\"/usr/lib/x86_64-linux-gnu\")\n+ vulkan_lib32_multi = search_for_file(\"/usr/lib32/i386-linux-gnu\")\n has_32_bit = vulkan_lib32 or vulkan_lib32_multi\n has_64_bit = vulkan_lib or vulkan_lib_multi\n", "issue": "Vulkan check will not work on certain distributions\nNote that this probably won't work on certain distributions, where this file, for instance, is actually called `/usr/lib/x86_64-linux-gnu/libvulkan.so.1` or similar. Confirmed not working on Linux Mint 19.\r\n\r\n_Originally posted by @Vexatos in https://github.com/lutris/lutris/pull/1186_\n", "before_files": [{"content": "\"\"\"Vulkan helper module\"\"\"\nimport os\nfrom enum import Enum\n\nclass vulkan_available(Enum):\n NONE = 0\n THIRTY_TWO = 1\n SIXTY_FOUR = 2\n ALL = 3\n\ndef vulkan_check():\n vulkan_lib = os.path.isfile(\"/usr/lib/libvulkan.so\")\n vulkan_lib32 = os.path.isfile(\"/usr/lib32/libvulkan.so\")\n vulkan_lib_multi = os.path.isfile(\"/usr/lib/x86_64-linux-gnu/libvulkan.so\")\n vulkan_lib32_multi = os.path.isfile(\"/usr/lib32/i386-linux-gnu/libvulkan.so\")\n has_32_bit = vulkan_lib32 or vulkan_lib32_multi\n has_64_bit = vulkan_lib or vulkan_lib_multi\n\n if not (has_64_bit or has_32_bit):\n return vulkan_available.NONE\n if has_64_bit and not has_32_bit:\n return vulkan_available.SIXTY_FOUR\n if not has_64_bit and has_32_bit:\n return vulkan_available.THIRTY_TWO\n return vulkan_available.ALL\n", "path": "lutris/util/vulkan.py"}]}
| 933 | 401 |
gh_patches_debug_16786
|
rasdani/github-patches
|
git_diff
|
blaze__blaze-431
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Individual columns should be able to repr if not passed in CSV
This issue title is possibly the worst ever, so here's an example:
``` python
import tempfile
import pandas as pd
from blaze import *
```
This works:
``` python
with tempfile.NamedTemporaryFile(delete=False) as f:
df = pd.DataFrame(np.random.randn(10, 2))
df.to_csv(f.name, index=False, header=False)
csv = CSV(f.name, columns=list('ab')) # passing columns to CSV
t = Table(csv)
assert t.a.isidentical(t['a'])
```
But this:
``` python
with tempfile.NamedTemporaryFile(delete=False) as f:
df = pd.DataFrame(np.random.randn(10, 2))
df.to_csv(f.name, index=False, header=False)
csv = CSV(f.name)
t = Table(csv, columns=list('ab')) # passing columns to Table
assert t.a.isidentical(t['a'])
```
yield a `t` where `t.a` throws an error in the interpreter when I try to `repr` it.
The difference is that the first propagates the columns to the `Table` (or maybe it doesn't propagate, but it uses the correct names), while in the second the columns are still represented by their "anonymous" names `_0`, `_1`, etc.
</issue>
<code>
[start of blaze/api/table.py]
1
2 from datashape import discover, Tuple, Record, dshape, Fixed
3 import itertools
4
5 from ..expr.core import Expr
6 from ..expr.table import TableSymbol, TableExpr
7 from ..data.python import Python
8 from ..dispatch import dispatch
9 from ..data.core import DataDescriptor, discover
10 from ..data.pandas import into, DataFrame
11 from .into import into
12
13 names = ('_%d' % i for i in itertools.count(1))
14
15 class Table(TableSymbol):
16 """ Interactive Table
17
18 Parameters
19 ----------
20
21 data: DataDescriptor, tuple, DataFrame, RDD, SQL Table, ...
22 Anything that ``compute`` knows how to work with
23
24 Optional
25 --------
26
27 name: string
28 A name for the table
29 columns: iterable of strings
30 Column names, will be inferred from datasource if possible
31 schema: string or DataShape
32 Explitit Record containing datatypes and column names
33 """
34 __slots__ = 'data', 'schema', '_name', 'iscolumn'
35
36 def __init__(self, data, name=None, columns=None, schema=None,
37 iscolumn=False):
38 if not schema:
39 schema = discover(data).subshape[0]
40 types = None
41 if isinstance(schema[0], Tuple):
42 columns = columns or list(range(len(schema[0].dshapes)))
43 types = schema[0].dshapes
44 if isinstance(schema[0], Record):
45 columns = columns or schema[0].names
46 types = schema[0].types
47 if isinstance(schema[0], Fixed):
48 types = (schema[1],) * int(schema[0])
49 if not columns:
50 raise TypeError("Could not infer column names from data. "
51 "Please specify column names with `column=` "
52 "keyword")
53 if not types:
54 raise TypeError("Could not infer data types from data. "
55 "Please specify schema with `schema=` keyword")
56
57 schema = dshape(Record(list(zip(columns, types))))
58 self.schema = dshape(schema)
59
60 self.data = data
61 self._name = name or next(names)
62 self.iscolumn = iscolumn
63
64 def resources(self):
65 return {self: self.data}
66
67 @property
68 def args(self):
69 return (id(self.data), self.schema, self._name, self.iscolumn)
70
71
72 @dispatch(Table, dict)
73 def _subs(o, d):
74 return o
75
76
77 @dispatch(Expr)
78 def compute(expr):
79 resources = expr.resources()
80 if not resources:
81 raise ValueError("No data resources found")
82 else:
83 return compute(expr, resources)
84
85
86 def table_repr(expr, n=10):
87 if not expr.resources():
88 return str(expr)
89 if isinstance(expr, TableExpr):
90 head = expr.head(n + 1)
91 result = compute(head)
92
93 if expr.columns:
94 df = into(DataFrame(columns=expr.columns), result)
95 else:
96 df = into(DataFrame, result)
97 s = repr(df)
98 if len(df) > 10:
99 df = df[:10]
100 s = '\n'.join(s.split('\n')[:-1]) + '\n...'
101 return s
102
103 else:
104 return repr(compute(expr))
105
106
107 @dispatch((type, object), TableExpr)
108 def into(a, b):
109 return into(a, compute(b))
110
111
112 @dispatch(DataFrame, TableExpr)
113 def into(a, b):
114 columns = b.columns
115 return into(DataFrame(columns=columns), compute(b))
116
117
118 Expr.__repr__ = table_repr
119
[end of blaze/api/table.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/blaze/api/table.py b/blaze/api/table.py
--- a/blaze/api/table.py
+++ b/blaze/api/table.py
@@ -4,9 +4,7 @@
from ..expr.core import Expr
from ..expr.table import TableSymbol, TableExpr
-from ..data.python import Python
from ..dispatch import dispatch
-from ..data.core import DataDescriptor, discover
from ..data.pandas import into, DataFrame
from .into import into
@@ -58,6 +56,12 @@
self.schema = dshape(schema)
self.data = data
+
+ if hasattr(data, 'schema') and self.schema != data.schema:
+ raise TypeError('%s schema %s does not match %s schema %s' %
+ (type(data).__name__, data.schema,
+ type(self).__name__, self.schema))
+
self._name = name or next(names)
self.iscolumn = iscolumn
|
{"golden_diff": "diff --git a/blaze/api/table.py b/blaze/api/table.py\n--- a/blaze/api/table.py\n+++ b/blaze/api/table.py\n@@ -4,9 +4,7 @@\n \n from ..expr.core import Expr\n from ..expr.table import TableSymbol, TableExpr\n-from ..data.python import Python\n from ..dispatch import dispatch\n-from ..data.core import DataDescriptor, discover\n from ..data.pandas import into, DataFrame\n from .into import into\n \n@@ -58,6 +56,12 @@\n self.schema = dshape(schema)\n \n self.data = data\n+\n+ if hasattr(data, 'schema') and self.schema != data.schema:\n+ raise TypeError('%s schema %s does not match %s schema %s' %\n+ (type(data).__name__, data.schema,\n+ type(self).__name__, self.schema))\n+\n self._name = name or next(names)\n self.iscolumn = iscolumn\n", "issue": "Individual columns should be able to repr if not passed in CSV\nThis issue title is possibly the worst ever, so here's an example:\n\n``` python\nimport tempfile\nimport pandas as pd\nfrom blaze import *\n```\n\nThis works:\n\n``` python\nwith tempfile.NamedTemporaryFile(delete=False) as f:\n df = pd.DataFrame(np.random.randn(10, 2))\n df.to_csv(f.name, index=False, header=False)\n csv = CSV(f.name, columns=list('ab')) # passing columns to CSV\n t = Table(csv)\n assert t.a.isidentical(t['a'])\n```\n\nBut this:\n\n``` python\nwith tempfile.NamedTemporaryFile(delete=False) as f:\n df = pd.DataFrame(np.random.randn(10, 2))\n df.to_csv(f.name, index=False, header=False)\n csv = CSV(f.name)\n t = Table(csv, columns=list('ab')) # passing columns to Table\n assert t.a.isidentical(t['a'])\n```\n\nyield a `t` where `t.a` throws an error in the interpreter when I try to `repr` it.\n\nThe difference is that the first propagates the columns to the `Table` (or maybe it doesn't propagate, but it uses the correct names), while in the second the columns are still represented by their \"anonymous\" names `_0`, `_1`, etc.\n\n", "before_files": [{"content": "\nfrom datashape import discover, Tuple, Record, dshape, Fixed\nimport itertools\n\nfrom ..expr.core import Expr\nfrom ..expr.table import TableSymbol, TableExpr\nfrom ..data.python import Python\nfrom ..dispatch import dispatch\nfrom ..data.core import DataDescriptor, discover\nfrom ..data.pandas import into, DataFrame\nfrom .into import into\n\nnames = ('_%d' % i for i in itertools.count(1))\n\nclass Table(TableSymbol):\n \"\"\" Interactive Table\n\n Parameters\n ----------\n\n data: DataDescriptor, tuple, DataFrame, RDD, SQL Table, ...\n Anything that ``compute`` knows how to work with\n\n Optional\n --------\n\n name: string\n A name for the table\n columns: iterable of strings\n Column names, will be inferred from datasource if possible\n schema: string or DataShape\n Explitit Record containing datatypes and column names\n \"\"\"\n __slots__ = 'data', 'schema', '_name', 'iscolumn'\n\n def __init__(self, data, name=None, columns=None, schema=None,\n iscolumn=False):\n if not schema:\n schema = discover(data).subshape[0]\n types = None\n if isinstance(schema[0], Tuple):\n columns = columns or list(range(len(schema[0].dshapes)))\n types = schema[0].dshapes\n if isinstance(schema[0], Record):\n columns = columns or schema[0].names\n types = schema[0].types\n if isinstance(schema[0], Fixed):\n types = (schema[1],) * int(schema[0])\n if not columns:\n raise TypeError(\"Could not infer column names from data. \"\n \"Please specify column names with `column=` \"\n \"keyword\")\n if not types:\n raise TypeError(\"Could not infer data types from data. \"\n \"Please specify schema with `schema=` keyword\")\n\n schema = dshape(Record(list(zip(columns, types))))\n self.schema = dshape(schema)\n\n self.data = data\n self._name = name or next(names)\n self.iscolumn = iscolumn\n\n def resources(self):\n return {self: self.data}\n\n @property\n def args(self):\n return (id(self.data), self.schema, self._name, self.iscolumn)\n\n\n@dispatch(Table, dict)\ndef _subs(o, d):\n return o\n\n\n@dispatch(Expr)\ndef compute(expr):\n resources = expr.resources()\n if not resources:\n raise ValueError(\"No data resources found\")\n else:\n return compute(expr, resources)\n\n\ndef table_repr(expr, n=10):\n if not expr.resources():\n return str(expr)\n if isinstance(expr, TableExpr):\n head = expr.head(n + 1)\n result = compute(head)\n\n if expr.columns:\n df = into(DataFrame(columns=expr.columns), result)\n else:\n df = into(DataFrame, result)\n s = repr(df)\n if len(df) > 10:\n df = df[:10]\n s = '\\n'.join(s.split('\\n')[:-1]) + '\\n...'\n return s\n\n else:\n return repr(compute(expr))\n\n\n@dispatch((type, object), TableExpr)\ndef into(a, b):\n return into(a, compute(b))\n\n\n@dispatch(DataFrame, TableExpr)\ndef into(a, b):\n columns = b.columns\n return into(DataFrame(columns=columns), compute(b))\n\n\nExpr.__repr__ = table_repr\n", "path": "blaze/api/table.py"}]}
| 1,839 | 204 |
gh_patches_debug_31458
|
rasdani/github-patches
|
git_diff
|
geopandas__geopandas-307
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
overlay gives confusing error when passed `GeoSeries`
Should either support or give informative error. Right now gives:
```
In [6]: country_cores = overlay(countries, capital_buffer, how='intersection')
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-a69b51de5bcf> in <module>()
----> 1 country_cores = overlay(countries, capital_buffer, how='intersection')
/Users/Nick/github/geopandas/geopandas/tools/overlay.py in overlay(df1, df2, how, use_sindex)
85 # Collect the interior and exterior rings
86 rings1 = _extract_rings(df1)
---> 87 rings2 = _extract_rings(df2)
88 mls1 = MultiLineString(rings1)
89 mls2 = MultiLineString(rings2)
/Users/Nick/github/geopandas/geopandas/tools/overlay.py in _extract_rings(df)
31 poly_msg = "overlay only takes GeoDataFrames with (multi)polygon geometries"
32 rings = []
---> 33 for i, feat in df.iterrows():
34 geom = feat.geometry
35
/Users/Nick/github/pandas/pandas/core/generic.py in __getattr__(self, name)
2665 if name in self._info_axis:
2666 return self[name]
-> 2667 return object.__getattribute__(self, name)
2668
2669 def __setattr__(self, name, value):
AttributeError: 'GeoSeries' object has no attribute 'iterrows'
```
</issue>
<code>
[start of geopandas/tools/overlay.py]
1 from shapely.ops import unary_union, polygonize
2 from shapely.geometry import MultiLineString
3 import pandas as pd
4 from geopandas import GeoDataFrame, GeoSeries
5
6
7 def _uniquify(columns):
8 ucols = []
9 for col in columns:
10 inc = 1
11 newcol = col
12 while newcol in ucols:
13 inc += 1
14 newcol = "{0}_{1}".format(col, inc)
15 ucols.append(newcol)
16 return ucols
17
18
19 def _extract_rings(df):
20 """Collects all inner and outer linear rings from a GeoDataFrame
21 with (multi)Polygon geometeries
22
23 Parameters
24 ----------
25 df: GeoDataFrame with MultiPolygon or Polygon geometry column
26
27 Returns
28 -------
29 rings: list of LinearRings
30 """
31 poly_msg = "overlay only takes GeoDataFrames with (multi)polygon geometries"
32 rings = []
33 for i, feat in df.iterrows():
34 geom = feat.geometry
35
36 if geom.type not in ['Polygon', 'MultiPolygon']:
37 raise TypeError(poly_msg)
38
39 if hasattr(geom, 'geoms'):
40 for poly in geom.geoms: # if it's a multipolygon
41 if not poly.is_valid:
42 # geom from layer is not valid attempting fix by buffer 0"
43 poly = poly.buffer(0)
44 rings.append(poly.exterior)
45 rings.extend(poly.interiors)
46 else:
47 if not geom.is_valid:
48 # geom from layer is not valid attempting fix by buffer 0"
49 geom = geom.buffer(0)
50 rings.append(geom.exterior)
51 rings.extend(geom.interiors)
52
53 return rings
54
55 def overlay(df1, df2, how, use_sindex=True):
56 """Perform spatial overlay between two polygons
57 Currently only supports data GeoDataFrames with polygons
58
59 Implements several methods (see `allowed_hows` list) that are
60 all effectively subsets of the union.
61
62 Parameters
63 ----------
64 df1 : GeoDataFrame with MultiPolygon or Polygon geometry column
65 df2 : GeoDataFrame with MultiPolygon or Polygon geometry column
66 how : method of spatial overlay
67 use_sindex : Boolean; Use the spatial index to speed up operation. Default is True.
68
69 Returns
70 -------
71 df : GeoDataFrame with new set of polygons and attributes resulting from the overlay
72 """
73 allowed_hows = [
74 'intersection',
75 'union',
76 'identity',
77 'symmetric_difference',
78 'difference', # aka erase
79 ]
80
81 if how not in allowed_hows:
82 raise ValueError("`how` was \"%s\" but is expected to be in %s" % \
83 (how, allowed_hows))
84
85 # Collect the interior and exterior rings
86 rings1 = _extract_rings(df1)
87 rings2 = _extract_rings(df2)
88 mls1 = MultiLineString(rings1)
89 mls2 = MultiLineString(rings2)
90
91 # Union and polygonize
92 try:
93 # calculating union (try the fast unary_union)
94 mm = unary_union([mls1, mls2])
95 except:
96 # unary_union FAILED
97 # see https://github.com/Toblerity/Shapely/issues/47#issuecomment-18506767
98 # calculating union again (using the slow a.union(b))
99 mm = mls1.union(mls2)
100 newpolys = polygonize(mm)
101
102 # determine spatial relationship
103 collection = []
104 for fid, newpoly in enumerate(newpolys):
105 cent = newpoly.representative_point()
106
107 # Test intersection with original polys
108 # FIXME there should be a higher-level abstraction to search by bounds
109 # and fall back in the case of no index?
110 if use_sindex and df1.sindex is not None:
111 candidates1 = [x.object for x in
112 df1.sindex.intersection(newpoly.bounds, objects=True)]
113 else:
114 candidates1 = [i for i, x in df1.iterrows()]
115
116 if use_sindex and df2.sindex is not None:
117 candidates2 = [x.object for x in
118 df2.sindex.intersection(newpoly.bounds, objects=True)]
119 else:
120 candidates2 = [i for i, x in df2.iterrows()]
121
122 df1_hit = False
123 df2_hit = False
124 prop1 = None
125 prop2 = None
126 for cand_id in candidates1:
127 cand = df1.ix[cand_id]
128 if cent.intersects(cand.geometry):
129 df1_hit = True
130 prop1 = cand
131 break # Take the first hit
132 for cand_id in candidates2:
133 cand = df2.ix[cand_id]
134 if cent.intersects(cand.geometry):
135 df2_hit = True
136 prop2 = cand
137 break # Take the first hit
138
139 # determine spatial relationship based on type of overlay
140 hit = False
141 if how == "intersection" and (df1_hit and df2_hit):
142 hit = True
143 elif how == "union" and (df1_hit or df2_hit):
144 hit = True
145 elif how == "identity" and df1_hit:
146 hit = True
147 elif how == "symmetric_difference" and not (df1_hit and df2_hit):
148 hit = True
149 elif how == "difference" and (df1_hit and not df2_hit):
150 hit = True
151
152 if not hit:
153 continue
154
155 # gather properties
156 if prop1 is None:
157 prop1 = pd.Series(dict.fromkeys(df1.columns, None))
158 if prop2 is None:
159 prop2 = pd.Series(dict.fromkeys(df2.columns, None))
160
161 # Concat but don't retain the original geometries
162 out_series = pd.concat([prop1.drop(df1._geometry_column_name),
163 prop2.drop(df2._geometry_column_name)])
164
165 out_series.index = _uniquify(out_series.index)
166
167 # Create a geoseries and add it to the collection
168 out_series['geometry'] = newpoly
169 collection.append(out_series)
170
171 # Return geodataframe with new indicies
172 return GeoDataFrame(collection, index=range(len(collection)))
173
[end of geopandas/tools/overlay.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/geopandas/tools/overlay.py b/geopandas/tools/overlay.py
--- a/geopandas/tools/overlay.py
+++ b/geopandas/tools/overlay.py
@@ -30,8 +30,10 @@
"""
poly_msg = "overlay only takes GeoDataFrames with (multi)polygon geometries"
rings = []
+ geometry_column = df.geometry.name
+
for i, feat in df.iterrows():
- geom = feat.geometry
+ geom = feat[geometry_column]
if geom.type not in ['Polygon', 'MultiPolygon']:
raise TypeError(poly_msg)
@@ -82,6 +84,9 @@
raise ValueError("`how` was \"%s\" but is expected to be in %s" % \
(how, allowed_hows))
+ if isinstance(df1, GeoSeries) or isinstance(df2, GeoSeries):
+ raise NotImplementedError("overlay currently only implemented for GeoDataFrames")
+
# Collect the interior and exterior rings
rings1 = _extract_rings(df1)
rings2 = _extract_rings(df2)
@@ -125,13 +130,13 @@
prop2 = None
for cand_id in candidates1:
cand = df1.ix[cand_id]
- if cent.intersects(cand.geometry):
+ if cent.intersects(cand[df1.geometry.name]):
df1_hit = True
prop1 = cand
break # Take the first hit
for cand_id in candidates2:
cand = df2.ix[cand_id]
- if cent.intersects(cand.geometry):
+ if cent.intersects(cand[df2.geometry.name]):
df2_hit = True
prop2 = cand
break # Take the first hit
|
{"golden_diff": "diff --git a/geopandas/tools/overlay.py b/geopandas/tools/overlay.py\n--- a/geopandas/tools/overlay.py\n+++ b/geopandas/tools/overlay.py\n@@ -30,8 +30,10 @@\n \"\"\"\n poly_msg = \"overlay only takes GeoDataFrames with (multi)polygon geometries\"\n rings = []\n+ geometry_column = df.geometry.name\n+\n for i, feat in df.iterrows():\n- geom = feat.geometry\n+ geom = feat[geometry_column]\n \n if geom.type not in ['Polygon', 'MultiPolygon']:\n raise TypeError(poly_msg)\n@@ -82,6 +84,9 @@\n raise ValueError(\"`how` was \\\"%s\\\" but is expected to be in %s\" % \\\n (how, allowed_hows))\n \n+ if isinstance(df1, GeoSeries) or isinstance(df2, GeoSeries):\n+ raise NotImplementedError(\"overlay currently only implemented for GeoDataFrames\")\n+\n # Collect the interior and exterior rings\n rings1 = _extract_rings(df1)\n rings2 = _extract_rings(df2)\n@@ -125,13 +130,13 @@\n prop2 = None\n for cand_id in candidates1:\n cand = df1.ix[cand_id]\n- if cent.intersects(cand.geometry):\n+ if cent.intersects(cand[df1.geometry.name]):\n df1_hit = True\n prop1 = cand\n break # Take the first hit\n for cand_id in candidates2:\n cand = df2.ix[cand_id]\n- if cent.intersects(cand.geometry):\n+ if cent.intersects(cand[df2.geometry.name]):\n df2_hit = True\n prop2 = cand\n break # Take the first hit\n", "issue": "overlay gives confusing error when passed `GeoSeries`\nShould either support or give informative error. Right now gives:\n\n```\nIn [6]: country_cores = overlay(countries, capital_buffer, how='intersection')\n---------------------------------------------------------------------------\nAttributeError Traceback (most recent call last)\n<ipython-input-6-a69b51de5bcf> in <module>()\n----> 1 country_cores = overlay(countries, capital_buffer, how='intersection')\n\n/Users/Nick/github/geopandas/geopandas/tools/overlay.py in overlay(df1, df2, how, use_sindex)\n 85 # Collect the interior and exterior rings\n 86 rings1 = _extract_rings(df1)\n---> 87 rings2 = _extract_rings(df2)\n 88 mls1 = MultiLineString(rings1)\n 89 mls2 = MultiLineString(rings2)\n\n/Users/Nick/github/geopandas/geopandas/tools/overlay.py in _extract_rings(df)\n 31 poly_msg = \"overlay only takes GeoDataFrames with (multi)polygon geometries\"\n 32 rings = []\n---> 33 for i, feat in df.iterrows():\n 34 geom = feat.geometry\n 35 \n\n/Users/Nick/github/pandas/pandas/core/generic.py in __getattr__(self, name)\n 2665 if name in self._info_axis:\n 2666 return self[name]\n-> 2667 return object.__getattribute__(self, name)\n 2668 \n 2669 def __setattr__(self, name, value):\n\nAttributeError: 'GeoSeries' object has no attribute 'iterrows'\n```\n\n", "before_files": [{"content": "from shapely.ops import unary_union, polygonize\nfrom shapely.geometry import MultiLineString\nimport pandas as pd\nfrom geopandas import GeoDataFrame, GeoSeries\n\n\ndef _uniquify(columns):\n ucols = []\n for col in columns:\n inc = 1\n newcol = col\n while newcol in ucols:\n inc += 1\n newcol = \"{0}_{1}\".format(col, inc)\n ucols.append(newcol)\n return ucols\n\n\ndef _extract_rings(df):\n \"\"\"Collects all inner and outer linear rings from a GeoDataFrame \n with (multi)Polygon geometeries\n\n Parameters\n ----------\n df: GeoDataFrame with MultiPolygon or Polygon geometry column\n\n Returns\n -------\n rings: list of LinearRings\n \"\"\"\n poly_msg = \"overlay only takes GeoDataFrames with (multi)polygon geometries\"\n rings = []\n for i, feat in df.iterrows():\n geom = feat.geometry\n\n if geom.type not in ['Polygon', 'MultiPolygon']:\n raise TypeError(poly_msg)\n\n if hasattr(geom, 'geoms'):\n for poly in geom.geoms: # if it's a multipolygon\n if not poly.is_valid:\n # geom from layer is not valid attempting fix by buffer 0\"\n poly = poly.buffer(0)\n rings.append(poly.exterior)\n rings.extend(poly.interiors)\n else:\n if not geom.is_valid:\n # geom from layer is not valid attempting fix by buffer 0\"\n geom = geom.buffer(0)\n rings.append(geom.exterior)\n rings.extend(geom.interiors)\n\n return rings\n\ndef overlay(df1, df2, how, use_sindex=True):\n \"\"\"Perform spatial overlay between two polygons\n Currently only supports data GeoDataFrames with polygons\n\n Implements several methods (see `allowed_hows` list) that are\n all effectively subsets of the union.\n\n Parameters\n ----------\n df1 : GeoDataFrame with MultiPolygon or Polygon geometry column\n df2 : GeoDataFrame with MultiPolygon or Polygon geometry column\n how : method of spatial overlay\n use_sindex : Boolean; Use the spatial index to speed up operation. Default is True.\n\n Returns\n -------\n df : GeoDataFrame with new set of polygons and attributes resulting from the overlay\n \"\"\"\n allowed_hows = [\n 'intersection',\n 'union',\n 'identity',\n 'symmetric_difference',\n 'difference', # aka erase\n ]\n\n if how not in allowed_hows:\n raise ValueError(\"`how` was \\\"%s\\\" but is expected to be in %s\" % \\\n (how, allowed_hows))\n\n # Collect the interior and exterior rings\n rings1 = _extract_rings(df1)\n rings2 = _extract_rings(df2)\n mls1 = MultiLineString(rings1)\n mls2 = MultiLineString(rings2)\n\n # Union and polygonize\n try:\n # calculating union (try the fast unary_union)\n mm = unary_union([mls1, mls2])\n except:\n # unary_union FAILED\n # see https://github.com/Toblerity/Shapely/issues/47#issuecomment-18506767\n # calculating union again (using the slow a.union(b))\n mm = mls1.union(mls2)\n newpolys = polygonize(mm)\n\n # determine spatial relationship\n collection = []\n for fid, newpoly in enumerate(newpolys):\n cent = newpoly.representative_point()\n\n # Test intersection with original polys\n # FIXME there should be a higher-level abstraction to search by bounds\n # and fall back in the case of no index?\n if use_sindex and df1.sindex is not None:\n candidates1 = [x.object for x in\n df1.sindex.intersection(newpoly.bounds, objects=True)]\n else:\n candidates1 = [i for i, x in df1.iterrows()]\n\n if use_sindex and df2.sindex is not None:\n candidates2 = [x.object for x in\n df2.sindex.intersection(newpoly.bounds, objects=True)]\n else:\n candidates2 = [i for i, x in df2.iterrows()]\n\n df1_hit = False\n df2_hit = False\n prop1 = None\n prop2 = None\n for cand_id in candidates1:\n cand = df1.ix[cand_id]\n if cent.intersects(cand.geometry):\n df1_hit = True\n prop1 = cand\n break # Take the first hit\n for cand_id in candidates2:\n cand = df2.ix[cand_id]\n if cent.intersects(cand.geometry):\n df2_hit = True\n prop2 = cand\n break # Take the first hit\n\n # determine spatial relationship based on type of overlay\n hit = False\n if how == \"intersection\" and (df1_hit and df2_hit):\n hit = True\n elif how == \"union\" and (df1_hit or df2_hit):\n hit = True\n elif how == \"identity\" and df1_hit:\n hit = True\n elif how == \"symmetric_difference\" and not (df1_hit and df2_hit):\n hit = True\n elif how == \"difference\" and (df1_hit and not df2_hit):\n hit = True\n\n if not hit:\n continue\n\n # gather properties\n if prop1 is None:\n prop1 = pd.Series(dict.fromkeys(df1.columns, None))\n if prop2 is None:\n prop2 = pd.Series(dict.fromkeys(df2.columns, None))\n\n # Concat but don't retain the original geometries\n out_series = pd.concat([prop1.drop(df1._geometry_column_name),\n prop2.drop(df2._geometry_column_name)])\n\n out_series.index = _uniquify(out_series.index)\n\n # Create a geoseries and add it to the collection\n out_series['geometry'] = newpoly\n collection.append(out_series)\n\n # Return geodataframe with new indicies\n return GeoDataFrame(collection, index=range(len(collection)))\n", "path": "geopandas/tools/overlay.py"}]}
| 2,690 | 388 |
gh_patches_debug_28013
|
rasdani/github-patches
|
git_diff
|
CTFd__CTFd-2344
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Challenge Preview Improvements
Challenge Preview should probably preview in the context of a full page. Primarily because it's easier to theme this.
</issue>
<code>
[start of CTFd/admin/challenges.py]
1 from flask import abort, render_template, request, url_for
2
3 from CTFd.admin import admin
4 from CTFd.models import Challenges, Flags, Solves
5 from CTFd.plugins.challenges import CHALLENGE_CLASSES, get_chal_class
6 from CTFd.utils.decorators import admins_only
7
8
9 @admin.route("/admin/challenges")
10 @admins_only
11 def challenges_listing():
12 q = request.args.get("q")
13 field = request.args.get("field")
14 filters = []
15
16 if q:
17 # The field exists as an exposed column
18 if Challenges.__mapper__.has_property(field):
19 filters.append(getattr(Challenges, field).like("%{}%".format(q)))
20
21 query = Challenges.query.filter(*filters).order_by(Challenges.id.asc())
22 challenges = query.all()
23 total = query.count()
24
25 return render_template(
26 "admin/challenges/challenges.html",
27 challenges=challenges,
28 total=total,
29 q=q,
30 field=field,
31 )
32
33
34 @admin.route("/admin/challenges/<int:challenge_id>")
35 @admins_only
36 def challenges_detail(challenge_id):
37 challenges = dict(
38 Challenges.query.with_entities(Challenges.id, Challenges.name).all()
39 )
40 challenge = Challenges.query.filter_by(id=challenge_id).first_or_404()
41 solves = (
42 Solves.query.filter_by(challenge_id=challenge.id)
43 .order_by(Solves.date.asc())
44 .all()
45 )
46 flags = Flags.query.filter_by(challenge_id=challenge.id).all()
47
48 try:
49 challenge_class = get_chal_class(challenge.type)
50 except KeyError:
51 abort(
52 500,
53 f"The underlying challenge type ({challenge.type}) is not installed. This challenge can not be loaded.",
54 )
55
56 update_j2 = render_template(
57 challenge_class.templates["update"].lstrip("/"), challenge=challenge
58 )
59
60 update_script = url_for(
61 "views.static_html", route=challenge_class.scripts["update"].lstrip("/")
62 )
63 return render_template(
64 "admin/challenges/challenge.html",
65 update_template=update_j2,
66 update_script=update_script,
67 challenge=challenge,
68 challenges=challenges,
69 solves=solves,
70 flags=flags,
71 )
72
73
74 @admin.route("/admin/challenges/new")
75 @admins_only
76 def challenges_new():
77 types = CHALLENGE_CLASSES.keys()
78 return render_template("admin/challenges/new.html", types=types)
79
[end of CTFd/admin/challenges.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/CTFd/admin/challenges.py b/CTFd/admin/challenges.py
--- a/CTFd/admin/challenges.py
+++ b/CTFd/admin/challenges.py
@@ -3,7 +3,10 @@
from CTFd.admin import admin
from CTFd.models import Challenges, Flags, Solves
from CTFd.plugins.challenges import CHALLENGE_CLASSES, get_chal_class
+from CTFd.schemas.tags import TagSchema
from CTFd.utils.decorators import admins_only
+from CTFd.utils.security.signing import serialize
+from CTFd.utils.user import get_current_team, get_current_user
@admin.route("/admin/challenges")
@@ -71,6 +74,43 @@
)
[email protected]("/admin/challenges/preview/<int:challenge_id>")
+@admins_only
+def challenges_preview(challenge_id):
+ challenge = Challenges.query.filter_by(id=challenge_id).first_or_404()
+ chal_class = get_chal_class(challenge.type)
+ user = get_current_user()
+ team = get_current_team()
+
+ files = []
+ for f in challenge.files:
+ token = {
+ "user_id": user.id,
+ "team_id": team.id if team else None,
+ "file_id": f.id,
+ }
+ files.append(url_for("views.files", path=f.location, token=serialize(token)))
+
+ tags = [
+ tag["value"] for tag in TagSchema("user", many=True).dump(challenge.tags).data
+ ]
+
+ content = render_template(
+ chal_class.templates["view"].lstrip("/"),
+ solves=None,
+ solved_by_me=False,
+ files=files,
+ tags=tags,
+ hints=challenge.hints,
+ max_attempts=challenge.max_attempts,
+ attempts=0,
+ challenge=challenge,
+ )
+ return render_template(
+ "admin/challenges/preview.html", content=content, challenge=challenge
+ )
+
+
@admin.route("/admin/challenges/new")
@admins_only
def challenges_new():
|
{"golden_diff": "diff --git a/CTFd/admin/challenges.py b/CTFd/admin/challenges.py\n--- a/CTFd/admin/challenges.py\n+++ b/CTFd/admin/challenges.py\n@@ -3,7 +3,10 @@\n from CTFd.admin import admin\n from CTFd.models import Challenges, Flags, Solves\n from CTFd.plugins.challenges import CHALLENGE_CLASSES, get_chal_class\n+from CTFd.schemas.tags import TagSchema\n from CTFd.utils.decorators import admins_only\n+from CTFd.utils.security.signing import serialize\n+from CTFd.utils.user import get_current_team, get_current_user\n \n \n @admin.route(\"/admin/challenges\")\n@@ -71,6 +74,43 @@\n )\n \n \[email protected](\"/admin/challenges/preview/<int:challenge_id>\")\n+@admins_only\n+def challenges_preview(challenge_id):\n+ challenge = Challenges.query.filter_by(id=challenge_id).first_or_404()\n+ chal_class = get_chal_class(challenge.type)\n+ user = get_current_user()\n+ team = get_current_team()\n+\n+ files = []\n+ for f in challenge.files:\n+ token = {\n+ \"user_id\": user.id,\n+ \"team_id\": team.id if team else None,\n+ \"file_id\": f.id,\n+ }\n+ files.append(url_for(\"views.files\", path=f.location, token=serialize(token)))\n+\n+ tags = [\n+ tag[\"value\"] for tag in TagSchema(\"user\", many=True).dump(challenge.tags).data\n+ ]\n+\n+ content = render_template(\n+ chal_class.templates[\"view\"].lstrip(\"/\"),\n+ solves=None,\n+ solved_by_me=False,\n+ files=files,\n+ tags=tags,\n+ hints=challenge.hints,\n+ max_attempts=challenge.max_attempts,\n+ attempts=0,\n+ challenge=challenge,\n+ )\n+ return render_template(\n+ \"admin/challenges/preview.html\", content=content, challenge=challenge\n+ )\n+\n+\n @admin.route(\"/admin/challenges/new\")\n @admins_only\n def challenges_new():\n", "issue": "Challenge Preview Improvements\nChallenge Preview should probably preview in the context of a full page. Primarily because it's easier to theme this. \n", "before_files": [{"content": "from flask import abort, render_template, request, url_for\n\nfrom CTFd.admin import admin\nfrom CTFd.models import Challenges, Flags, Solves\nfrom CTFd.plugins.challenges import CHALLENGE_CLASSES, get_chal_class\nfrom CTFd.utils.decorators import admins_only\n\n\[email protected](\"/admin/challenges\")\n@admins_only\ndef challenges_listing():\n q = request.args.get(\"q\")\n field = request.args.get(\"field\")\n filters = []\n\n if q:\n # The field exists as an exposed column\n if Challenges.__mapper__.has_property(field):\n filters.append(getattr(Challenges, field).like(\"%{}%\".format(q)))\n\n query = Challenges.query.filter(*filters).order_by(Challenges.id.asc())\n challenges = query.all()\n total = query.count()\n\n return render_template(\n \"admin/challenges/challenges.html\",\n challenges=challenges,\n total=total,\n q=q,\n field=field,\n )\n\n\[email protected](\"/admin/challenges/<int:challenge_id>\")\n@admins_only\ndef challenges_detail(challenge_id):\n challenges = dict(\n Challenges.query.with_entities(Challenges.id, Challenges.name).all()\n )\n challenge = Challenges.query.filter_by(id=challenge_id).first_or_404()\n solves = (\n Solves.query.filter_by(challenge_id=challenge.id)\n .order_by(Solves.date.asc())\n .all()\n )\n flags = Flags.query.filter_by(challenge_id=challenge.id).all()\n\n try:\n challenge_class = get_chal_class(challenge.type)\n except KeyError:\n abort(\n 500,\n f\"The underlying challenge type ({challenge.type}) is not installed. This challenge can not be loaded.\",\n )\n\n update_j2 = render_template(\n challenge_class.templates[\"update\"].lstrip(\"/\"), challenge=challenge\n )\n\n update_script = url_for(\n \"views.static_html\", route=challenge_class.scripts[\"update\"].lstrip(\"/\")\n )\n return render_template(\n \"admin/challenges/challenge.html\",\n update_template=update_j2,\n update_script=update_script,\n challenge=challenge,\n challenges=challenges,\n solves=solves,\n flags=flags,\n )\n\n\[email protected](\"/admin/challenges/new\")\n@admins_only\ndef challenges_new():\n types = CHALLENGE_CLASSES.keys()\n return render_template(\"admin/challenges/new.html\", types=types)\n", "path": "CTFd/admin/challenges.py"}]}
| 1,247 | 472 |
gh_patches_debug_28456
|
rasdani/github-patches
|
git_diff
|
apache__airflow-20671
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error setting dependencies on task_group defined using the decorator
### Apache Airflow version
2.2.2 (latest released)
### Operating System
MacOS 11.6.1
### Versions of Apache Airflow Providers
$ pip freeze | grep airflow
apache-airflow==2.2.2
apache-airflow-providers-celery==2.1.0
apache-airflow-providers-ftp==2.0.1
apache-airflow-providers-http==2.0.1
apache-airflow-providers-imap==2.0.1
apache-airflow-providers-sqlite==2.0.1
### Deployment
Other
### Deployment details
`airflow standalone`
### What happened
```
AttributeError: 'NoneType' object has no attribute 'update_relative'
```
### What you expected to happen
Task group should be set as downstream of `start` task, and upstream of `end` task
### How to reproduce
* Add the following code to dags folder
```python
from datetime import datetime
from airflow.decorators import dag, task, task_group
@dag(start_date=datetime(2023, 1, 1), schedule_interval="@once")
def test_dag_1():
@task
def start():
pass
@task
def do_thing(x):
print(x)
@task_group
def do_all_things():
do_thing(1)
do_thing(2)
@task
def end():
pass
start() >> do_all_things() >> end()
test_dag_1()
```
* Run `airflow standalone`
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
</issue>
<code>
[start of airflow/decorators/task_group.py]
1 #
2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
17 # under the License.
18 """
19 A TaskGroup is a collection of closely related tasks on the same DAG that should be grouped
20 together when the DAG is displayed graphically.
21 """
22 import functools
23 import warnings
24 from inspect import signature
25 from typing import TYPE_CHECKING, Any, Callable, Dict, Generic, Optional, TypeVar, cast, overload
26
27 import attr
28
29 from airflow.utils.task_group import MappedTaskGroup, TaskGroup
30
31 if TYPE_CHECKING:
32 from airflow.models import DAG
33
34 F = TypeVar("F", bound=Callable[..., Any])
35 T = TypeVar("T", bound=Callable)
36 R = TypeVar("R")
37
38 task_group_sig = signature(TaskGroup.__init__)
39
40
41 @attr.define
42 class TaskGroupDecorator(Generic[R]):
43 """:meta private:"""
44
45 function: Callable[..., R] = attr.ib(validator=attr.validators.is_callable())
46 kwargs: Dict[str, Any] = attr.ib(factory=dict)
47 """kwargs for the TaskGroup"""
48
49 @function.validator
50 def _validate_function(self, _, f):
51 if 'self' in signature(f).parameters:
52 raise TypeError('@task_group does not support methods')
53
54 @kwargs.validator
55 def _validate(self, _, kwargs):
56 task_group_sig.bind_partial(**kwargs)
57
58 def __attrs_post_init__(self):
59 self.kwargs.setdefault('group_id', self.function.__name__)
60
61 def _make_task_group(self, **kwargs) -> TaskGroup:
62 return TaskGroup(**kwargs)
63
64 def __call__(self, *args, **kwargs) -> R:
65 with self._make_task_group(add_suffix_on_collision=True, **self.kwargs):
66 # Invoke function to run Tasks inside the TaskGroup
67 return self.function(*args, **kwargs)
68
69 def partial(self, **kwargs) -> "MappedTaskGroupDecorator[R]":
70 return MappedTaskGroupDecorator(function=self.function, kwargs=self.kwargs).partial(**kwargs)
71
72 def map(self, **kwargs) -> R:
73 return MappedTaskGroupDecorator(function=self.function, kwargs=self.kwargs).map(**kwargs)
74
75
76 @attr.define
77 class MappedTaskGroupDecorator(TaskGroupDecorator[R]):
78 """:meta private:"""
79
80 partial_kwargs: Dict[str, Any] = attr.ib(factory=dict)
81 """static kwargs for the decorated function"""
82 mapped_kwargs: Dict[str, Any] = attr.ib(factory=dict)
83 """kwargs for the decorated function"""
84
85 def __call__(self, *args, **kwargs):
86 raise RuntimeError("A mapped @task_group cannot be called. Use `.map` and `.partial` instead")
87
88 def _make_task_group(self, **kwargs) -> MappedTaskGroup:
89 tg = MappedTaskGroup(**kwargs)
90 tg.partial_kwargs = self.partial_kwargs
91 tg.mapped_kwargs = self.mapped_kwargs
92 return tg
93
94 def partial(self, **kwargs) -> "MappedTaskGroupDecorator[R]":
95 if self.partial_kwargs:
96 raise RuntimeError("Already a partial task group")
97 self.partial_kwargs.update(kwargs)
98 return self
99
100 def map(self, **kwargs) -> R:
101 if self.mapped_kwargs:
102 raise RuntimeError("Already a mapped task group")
103 self.mapped_kwargs = kwargs
104
105 call_kwargs = self.partial_kwargs.copy()
106 duplicated_keys = set(call_kwargs).intersection(kwargs)
107 if duplicated_keys:
108 raise RuntimeError(f"Cannot map partial arguments: {', '.join(sorted(duplicated_keys))}")
109 call_kwargs.update({k: object() for k in kwargs})
110
111 return super().__call__(**call_kwargs)
112
113 def __del__(self):
114 if not self.mapped_kwargs:
115 warnings.warn(f"Partial task group {self.function.__name__} was never mapped!")
116
117
118 # This covers the @task_group() case. Annotations are copied from the TaskGroup
119 # class, only providing a default to 'group_id' (this is optional for the
120 # decorator and defaults to the decorated function's name). Please keep them in
121 # sync with TaskGroup when you can! Note that since this is an overload, these
122 # argument defaults aren't actually used at runtime--the real implementation
123 # does not use them, and simply rely on TaskGroup's defaults, so it's not
124 # disastrous if they go out of sync with TaskGroup.
125 @overload
126 def task_group(
127 group_id: Optional[str] = None,
128 prefix_group_id: bool = True,
129 parent_group: Optional["TaskGroup"] = None,
130 dag: Optional["DAG"] = None,
131 default_args: Optional[Dict[str, Any]] = None,
132 tooltip: str = "",
133 ui_color: str = "CornflowerBlue",
134 ui_fgcolor: str = "#000",
135 add_suffix_on_collision: bool = False,
136 ) -> Callable[[F], F]:
137 ...
138
139
140 # This covers the @task_group case (no parentheses).
141 @overload
142 def task_group(python_callable: F) -> F:
143 ...
144
145
146 def task_group(python_callable=None, *tg_args, **tg_kwargs):
147 """
148 Python TaskGroup decorator.
149
150 This wraps a function into an Airflow TaskGroup. When used as the
151 ``@task_group()`` form, all arguments are forwarded to the underlying
152 TaskGroup class. Can be used to parametrize TaskGroup.
153
154 :param python_callable: Function to decorate.
155 :param tg_args: Positional arguments for the TaskGroup object.
156 :param tg_kwargs: Keyword arguments for the TaskGroup object.
157 """
158 if callable(python_callable):
159 return TaskGroupDecorator(function=python_callable, kwargs=tg_kwargs)
160 return cast("Callable[[T], T]", functools.partial(TaskGroupDecorator, kwargs=tg_kwargs))
161
[end of airflow/decorators/task_group.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/airflow/decorators/task_group.py b/airflow/decorators/task_group.py
--- a/airflow/decorators/task_group.py
+++ b/airflow/decorators/task_group.py
@@ -42,7 +42,7 @@
class TaskGroupDecorator(Generic[R]):
""":meta private:"""
- function: Callable[..., R] = attr.ib(validator=attr.validators.is_callable())
+ function: Callable[..., Optional[R]] = attr.ib(validator=attr.validators.is_callable())
kwargs: Dict[str, Any] = attr.ib(factory=dict)
"""kwargs for the TaskGroup"""
@@ -62,9 +62,24 @@
return TaskGroup(**kwargs)
def __call__(self, *args, **kwargs) -> R:
- with self._make_task_group(add_suffix_on_collision=True, **self.kwargs):
+ with self._make_task_group(add_suffix_on_collision=True, **self.kwargs) as task_group:
# Invoke function to run Tasks inside the TaskGroup
- return self.function(*args, **kwargs)
+ retval = self.function(*args, **kwargs)
+
+ # If the task-creating function returns a task, forward the return value
+ # so dependencies bind to it. This is equivalent to
+ # with TaskGroup(...) as tg:
+ # t2 = task_2(task_1())
+ # start >> t2 >> end
+ if retval is not None:
+ return retval
+
+ # Otherwise return the task group as a whole, equivalent to
+ # with TaskGroup(...) as tg:
+ # task_1()
+ # task_2()
+ # start >> tg >> end
+ return task_group
def partial(self, **kwargs) -> "MappedTaskGroupDecorator[R]":
return MappedTaskGroupDecorator(function=self.function, kwargs=self.kwargs).partial(**kwargs)
|
{"golden_diff": "diff --git a/airflow/decorators/task_group.py b/airflow/decorators/task_group.py\n--- a/airflow/decorators/task_group.py\n+++ b/airflow/decorators/task_group.py\n@@ -42,7 +42,7 @@\n class TaskGroupDecorator(Generic[R]):\n \"\"\":meta private:\"\"\"\n \n- function: Callable[..., R] = attr.ib(validator=attr.validators.is_callable())\n+ function: Callable[..., Optional[R]] = attr.ib(validator=attr.validators.is_callable())\n kwargs: Dict[str, Any] = attr.ib(factory=dict)\n \"\"\"kwargs for the TaskGroup\"\"\"\n \n@@ -62,9 +62,24 @@\n return TaskGroup(**kwargs)\n \n def __call__(self, *args, **kwargs) -> R:\n- with self._make_task_group(add_suffix_on_collision=True, **self.kwargs):\n+ with self._make_task_group(add_suffix_on_collision=True, **self.kwargs) as task_group:\n # Invoke function to run Tasks inside the TaskGroup\n- return self.function(*args, **kwargs)\n+ retval = self.function(*args, **kwargs)\n+\n+ # If the task-creating function returns a task, forward the return value\n+ # so dependencies bind to it. This is equivalent to\n+ # with TaskGroup(...) as tg:\n+ # t2 = task_2(task_1())\n+ # start >> t2 >> end\n+ if retval is not None:\n+ return retval\n+\n+ # Otherwise return the task group as a whole, equivalent to\n+ # with TaskGroup(...) as tg:\n+ # task_1()\n+ # task_2()\n+ # start >> tg >> end\n+ return task_group\n \n def partial(self, **kwargs) -> \"MappedTaskGroupDecorator[R]\":\n return MappedTaskGroupDecorator(function=self.function, kwargs=self.kwargs).partial(**kwargs)\n", "issue": "Error setting dependencies on task_group defined using the decorator\n### Apache Airflow version\n\n2.2.2 (latest released)\n\n### Operating System\n\nMacOS 11.6.1\n\n### Versions of Apache Airflow Providers\n\n$ pip freeze | grep airflow\r\napache-airflow==2.2.2\r\napache-airflow-providers-celery==2.1.0\r\napache-airflow-providers-ftp==2.0.1\r\napache-airflow-providers-http==2.0.1\r\napache-airflow-providers-imap==2.0.1\r\napache-airflow-providers-sqlite==2.0.1\n\n### Deployment\n\nOther\n\n### Deployment details\n\n`airflow standalone`\n\n### What happened\n\n```\r\nAttributeError: 'NoneType' object has no attribute 'update_relative'\r\n```\n\n### What you expected to happen\n\nTask group should be set as downstream of `start` task, and upstream of `end` task\n\n### How to reproduce\n\n* Add the following code to dags folder\r\n\r\n```python\r\nfrom datetime import datetime\r\n\r\nfrom airflow.decorators import dag, task, task_group\r\n\r\n\r\n@dag(start_date=datetime(2023, 1, 1), schedule_interval=\"@once\")\r\ndef test_dag_1():\r\n @task\r\n def start():\r\n pass\r\n\r\n @task\r\n def do_thing(x):\r\n print(x)\r\n\r\n @task_group\r\n def do_all_things():\r\n do_thing(1)\r\n do_thing(2)\r\n\r\n @task\r\n def end():\r\n pass\r\n\r\n start() >> do_all_things() >> end()\r\n\r\n\r\ntest_dag_1()\r\n```\r\n\r\n* Run `airflow standalone`\n\n### Anything else\n\n_No response_\n\n### Are you willing to submit PR?\n\n- [ ] Yes I am willing to submit a PR!\n\n### Code of Conduct\n\n- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)\n\n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"\nA TaskGroup is a collection of closely related tasks on the same DAG that should be grouped\ntogether when the DAG is displayed graphically.\n\"\"\"\nimport functools\nimport warnings\nfrom inspect import signature\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, Generic, Optional, TypeVar, cast, overload\n\nimport attr\n\nfrom airflow.utils.task_group import MappedTaskGroup, TaskGroup\n\nif TYPE_CHECKING:\n from airflow.models import DAG\n\nF = TypeVar(\"F\", bound=Callable[..., Any])\nT = TypeVar(\"T\", bound=Callable)\nR = TypeVar(\"R\")\n\ntask_group_sig = signature(TaskGroup.__init__)\n\n\[email protected]\nclass TaskGroupDecorator(Generic[R]):\n \"\"\":meta private:\"\"\"\n\n function: Callable[..., R] = attr.ib(validator=attr.validators.is_callable())\n kwargs: Dict[str, Any] = attr.ib(factory=dict)\n \"\"\"kwargs for the TaskGroup\"\"\"\n\n @function.validator\n def _validate_function(self, _, f):\n if 'self' in signature(f).parameters:\n raise TypeError('@task_group does not support methods')\n\n @kwargs.validator\n def _validate(self, _, kwargs):\n task_group_sig.bind_partial(**kwargs)\n\n def __attrs_post_init__(self):\n self.kwargs.setdefault('group_id', self.function.__name__)\n\n def _make_task_group(self, **kwargs) -> TaskGroup:\n return TaskGroup(**kwargs)\n\n def __call__(self, *args, **kwargs) -> R:\n with self._make_task_group(add_suffix_on_collision=True, **self.kwargs):\n # Invoke function to run Tasks inside the TaskGroup\n return self.function(*args, **kwargs)\n\n def partial(self, **kwargs) -> \"MappedTaskGroupDecorator[R]\":\n return MappedTaskGroupDecorator(function=self.function, kwargs=self.kwargs).partial(**kwargs)\n\n def map(self, **kwargs) -> R:\n return MappedTaskGroupDecorator(function=self.function, kwargs=self.kwargs).map(**kwargs)\n\n\[email protected]\nclass MappedTaskGroupDecorator(TaskGroupDecorator[R]):\n \"\"\":meta private:\"\"\"\n\n partial_kwargs: Dict[str, Any] = attr.ib(factory=dict)\n \"\"\"static kwargs for the decorated function\"\"\"\n mapped_kwargs: Dict[str, Any] = attr.ib(factory=dict)\n \"\"\"kwargs for the decorated function\"\"\"\n\n def __call__(self, *args, **kwargs):\n raise RuntimeError(\"A mapped @task_group cannot be called. Use `.map` and `.partial` instead\")\n\n def _make_task_group(self, **kwargs) -> MappedTaskGroup:\n tg = MappedTaskGroup(**kwargs)\n tg.partial_kwargs = self.partial_kwargs\n tg.mapped_kwargs = self.mapped_kwargs\n return tg\n\n def partial(self, **kwargs) -> \"MappedTaskGroupDecorator[R]\":\n if self.partial_kwargs:\n raise RuntimeError(\"Already a partial task group\")\n self.partial_kwargs.update(kwargs)\n return self\n\n def map(self, **kwargs) -> R:\n if self.mapped_kwargs:\n raise RuntimeError(\"Already a mapped task group\")\n self.mapped_kwargs = kwargs\n\n call_kwargs = self.partial_kwargs.copy()\n duplicated_keys = set(call_kwargs).intersection(kwargs)\n if duplicated_keys:\n raise RuntimeError(f\"Cannot map partial arguments: {', '.join(sorted(duplicated_keys))}\")\n call_kwargs.update({k: object() for k in kwargs})\n\n return super().__call__(**call_kwargs)\n\n def __del__(self):\n if not self.mapped_kwargs:\n warnings.warn(f\"Partial task group {self.function.__name__} was never mapped!\")\n\n\n# This covers the @task_group() case. Annotations are copied from the TaskGroup\n# class, only providing a default to 'group_id' (this is optional for the\n# decorator and defaults to the decorated function's name). Please keep them in\n# sync with TaskGroup when you can! Note that since this is an overload, these\n# argument defaults aren't actually used at runtime--the real implementation\n# does not use them, and simply rely on TaskGroup's defaults, so it's not\n# disastrous if they go out of sync with TaskGroup.\n@overload\ndef task_group(\n group_id: Optional[str] = None,\n prefix_group_id: bool = True,\n parent_group: Optional[\"TaskGroup\"] = None,\n dag: Optional[\"DAG\"] = None,\n default_args: Optional[Dict[str, Any]] = None,\n tooltip: str = \"\",\n ui_color: str = \"CornflowerBlue\",\n ui_fgcolor: str = \"#000\",\n add_suffix_on_collision: bool = False,\n) -> Callable[[F], F]:\n ...\n\n\n# This covers the @task_group case (no parentheses).\n@overload\ndef task_group(python_callable: F) -> F:\n ...\n\n\ndef task_group(python_callable=None, *tg_args, **tg_kwargs):\n \"\"\"\n Python TaskGroup decorator.\n\n This wraps a function into an Airflow TaskGroup. When used as the\n ``@task_group()`` form, all arguments are forwarded to the underlying\n TaskGroup class. Can be used to parametrize TaskGroup.\n\n :param python_callable: Function to decorate.\n :param tg_args: Positional arguments for the TaskGroup object.\n :param tg_kwargs: Keyword arguments for the TaskGroup object.\n \"\"\"\n if callable(python_callable):\n return TaskGroupDecorator(function=python_callable, kwargs=tg_kwargs)\n return cast(\"Callable[[T], T]\", functools.partial(TaskGroupDecorator, kwargs=tg_kwargs))\n", "path": "airflow/decorators/task_group.py"}]}
| 2,716 | 427 |
gh_patches_debug_19819
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-3327
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Document about initializers
Criteria for initializer arguments of links is complicated (https://github.com/chainer/chainer/pull/3259#issuecomment-325562538). We should write some dedicated documentation about that, and let each link documentation point to it. Maybe we can write on [this page](https://docs.chainer.org/en/v2.0.2/reference/initializers.html).
Also we should describe *the default initializer* (which is `LeCunNormal`).
</issue>
<code>
[start of chainer/links/connection/linear.py]
1 from chainer.functions.connection import linear
2 from chainer import initializers
3 from chainer import link
4 from chainer import variable
5
6
7 class Linear(link.Link):
8
9 """Linear layer (a.k.a.\\ fully-connected layer).
10
11 This is a link that wraps the :func:`~chainer.functions.linear` function,
12 and holds a weight matrix ``W`` and optionally a bias vector ``b`` as
13 parameters.
14
15 The weight matrix ``W`` is initialized with i.i.d. Gaussian samples, each
16 of which has zero mean and deviation :math:`\\sqrt{1/\\text{in_size}}`. The
17 bias vector ``b`` is of size ``out_size``. Each element is initialized with
18 the ``bias`` value. If ``nobias`` argument is set to ``True``, then this
19 link does not hold a bias vector.
20
21 Args:
22 in_size (int or None): Dimension of input vectors. If ``None``,
23 parameter initialization will be deferred until the first forward
24 data pass at which time the size will be determined.
25 out_size (int): Dimension of output vectors.
26 nobias (bool): If ``True``, then this function does not use the bias.
27 initialW (2-D array): Initial weight value. If ``None``, then the
28 default initializer is used.
29 May also be a callable that takes ``numpy.ndarray`` or
30 ``cupy.ndarray`` and edits its value.
31 initial_bias (1-D array): Initial bias value. If ``None``, the bias
32 vector is initialized to zero.
33 May also be a callable that takes ``numpy.ndarray`` or
34 ``cupy.ndarray`` and edits its value.
35 .. seealso:: :func:`~chainer.functions.linear`
36
37 Attributes:
38 W (~chainer.Variable): Weight parameter.
39 b (~chainer.Variable): Bias parameter.
40
41 .. admonition:: Example
42
43 There are several ways to make a Linear link.
44
45 Define an input vector ``x`` as:
46
47 >>> x = np.array([[0, 1, 2, 3, 4]], 'f')
48
49 1. Give the first two arguments explicitly:
50
51 Those numbers are considered as the input size and the output size.
52
53 >>> l = L.Linear(5, 10)
54 >>> y = l(x)
55 >>> y.shape
56 (1, 10)
57
58 2. Omit ``in_size`` (give the output size only as the first argument)
59 or fill it with ``None``:
60
61 In this case, the size of second axis of ``x`` is used as the
62 input size. So the below two cases are the same.
63
64 >>> l = L.Linear(10)
65 >>> y = l(x)
66 >>> y.shape
67 (1, 10)
68
69 >>> l = L.Linear(None, 10)
70 >>> y = l(x)
71 >>> y.shape
72 (1, 10)
73
74 When you omit the first argument, you need to specify the other
75 subsequent arguments from ``nobias`` as keyword arguments. So the
76 below two cases are the same.
77
78 >>> l = L.Linear(None, 10, False, None, 0)
79 >>> y = l(x)
80 >>> y.shape
81 (1, 10)
82
83 >>> l = L.Linear(10, nobias=False, initialW=None, initial_bias=0)
84 >>> y = l(x)
85 >>> y.shape
86 (1, 10)
87
88 """
89
90 def __init__(self, in_size, out_size=None, nobias=False,
91 initialW=None, initial_bias=None):
92 super(Linear, self).__init__()
93
94 if out_size is None:
95 in_size, out_size = None, in_size
96 self.out_size = out_size
97
98 with self.init_scope():
99 W_initializer = initializers._get_initializer(initialW)
100 self.W = variable.Parameter(W_initializer)
101 if in_size is not None:
102 self._initialize_params(in_size)
103
104 if nobias:
105 self.b = None
106 else:
107 if initial_bias is None:
108 initial_bias = 0
109 bias_initializer = initializers._get_initializer(initial_bias)
110 self.b = variable.Parameter(bias_initializer, out_size)
111
112 def _initialize_params(self, in_size):
113 self.W.initialize((self.out_size, in_size))
114
115 def __call__(self, x):
116 """Applies the linear layer.
117
118 Args:
119 x (~chainer.Variable): Batch of input vectors.
120
121 Returns:
122 ~chainer.Variable: Output of the linear layer.
123
124 """
125 if self.W.data is None:
126 self._initialize_params(x.size // x.shape[0])
127 return linear.linear(x, self.W, self.b)
128
[end of chainer/links/connection/linear.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/chainer/links/connection/linear.py b/chainer/links/connection/linear.py
--- a/chainer/links/connection/linear.py
+++ b/chainer/links/connection/linear.py
@@ -24,14 +24,11 @@
data pass at which time the size will be determined.
out_size (int): Dimension of output vectors.
nobias (bool): If ``True``, then this function does not use the bias.
- initialW (2-D array): Initial weight value. If ``None``, then the
- default initializer is used.
- May also be a callable that takes ``numpy.ndarray`` or
- ``cupy.ndarray`` and edits its value.
- initial_bias (1-D array): Initial bias value. If ``None``, the bias
- vector is initialized to zero.
- May also be a callable that takes ``numpy.ndarray`` or
- ``cupy.ndarray`` and edits its value.
+ initialW (:ref:`initializer <initializer>`): Initializer to initialize
+ the weight.
+ initial_bias (:ref:`initializer <initializer>`): Initializer to
+ initialize the bias. If ``None``, the bias will be initialized to
+ zero.
.. seealso:: :func:`~chainer.functions.linear`
Attributes:
|
{"golden_diff": "diff --git a/chainer/links/connection/linear.py b/chainer/links/connection/linear.py\n--- a/chainer/links/connection/linear.py\n+++ b/chainer/links/connection/linear.py\n@@ -24,14 +24,11 @@\n data pass at which time the size will be determined.\n out_size (int): Dimension of output vectors.\n nobias (bool): If ``True``, then this function does not use the bias.\n- initialW (2-D array): Initial weight value. If ``None``, then the\n- default initializer is used.\n- May also be a callable that takes ``numpy.ndarray`` or\n- ``cupy.ndarray`` and edits its value.\n- initial_bias (1-D array): Initial bias value. If ``None``, the bias\n- vector is initialized to zero.\n- May also be a callable that takes ``numpy.ndarray`` or\n- ``cupy.ndarray`` and edits its value.\n+ initialW (:ref:`initializer <initializer>`): Initializer to initialize\n+ the weight.\n+ initial_bias (:ref:`initializer <initializer>`): Initializer to\n+ initialize the bias. If ``None``, the bias will be initialized to\n+ zero.\n .. seealso:: :func:`~chainer.functions.linear`\n \n Attributes:\n", "issue": "Document about initializers\nCriteria for initializer arguments of links is complicated (https://github.com/chainer/chainer/pull/3259#issuecomment-325562538). We should write some dedicated documentation about that, and let each link documentation point to it. Maybe we can write on [this page](https://docs.chainer.org/en/v2.0.2/reference/initializers.html).\r\n\r\nAlso we should describe *the default initializer* (which is `LeCunNormal`).\n", "before_files": [{"content": "from chainer.functions.connection import linear\nfrom chainer import initializers\nfrom chainer import link\nfrom chainer import variable\n\n\nclass Linear(link.Link):\n\n \"\"\"Linear layer (a.k.a.\\\\ fully-connected layer).\n\n This is a link that wraps the :func:`~chainer.functions.linear` function,\n and holds a weight matrix ``W`` and optionally a bias vector ``b`` as\n parameters.\n\n The weight matrix ``W`` is initialized with i.i.d. Gaussian samples, each\n of which has zero mean and deviation :math:`\\\\sqrt{1/\\\\text{in_size}}`. The\n bias vector ``b`` is of size ``out_size``. Each element is initialized with\n the ``bias`` value. If ``nobias`` argument is set to ``True``, then this\n link does not hold a bias vector.\n\n Args:\n in_size (int or None): Dimension of input vectors. If ``None``,\n parameter initialization will be deferred until the first forward\n data pass at which time the size will be determined.\n out_size (int): Dimension of output vectors.\n nobias (bool): If ``True``, then this function does not use the bias.\n initialW (2-D array): Initial weight value. If ``None``, then the\n default initializer is used.\n May also be a callable that takes ``numpy.ndarray`` or\n ``cupy.ndarray`` and edits its value.\n initial_bias (1-D array): Initial bias value. If ``None``, the bias\n vector is initialized to zero.\n May also be a callable that takes ``numpy.ndarray`` or\n ``cupy.ndarray`` and edits its value.\n .. seealso:: :func:`~chainer.functions.linear`\n\n Attributes:\n W (~chainer.Variable): Weight parameter.\n b (~chainer.Variable): Bias parameter.\n\n .. admonition:: Example\n\n There are several ways to make a Linear link.\n\n Define an input vector ``x`` as:\n\n >>> x = np.array([[0, 1, 2, 3, 4]], 'f')\n\n 1. Give the first two arguments explicitly:\n\n Those numbers are considered as the input size and the output size.\n\n >>> l = L.Linear(5, 10)\n >>> y = l(x)\n >>> y.shape\n (1, 10)\n\n 2. Omit ``in_size`` (give the output size only as the first argument)\n or fill it with ``None``:\n\n In this case, the size of second axis of ``x`` is used as the\n input size. So the below two cases are the same.\n\n >>> l = L.Linear(10)\n >>> y = l(x)\n >>> y.shape\n (1, 10)\n\n >>> l = L.Linear(None, 10)\n >>> y = l(x)\n >>> y.shape\n (1, 10)\n\n When you omit the first argument, you need to specify the other\n subsequent arguments from ``nobias`` as keyword arguments. So the\n below two cases are the same.\n\n >>> l = L.Linear(None, 10, False, None, 0)\n >>> y = l(x)\n >>> y.shape\n (1, 10)\n\n >>> l = L.Linear(10, nobias=False, initialW=None, initial_bias=0)\n >>> y = l(x)\n >>> y.shape\n (1, 10)\n\n \"\"\"\n\n def __init__(self, in_size, out_size=None, nobias=False,\n initialW=None, initial_bias=None):\n super(Linear, self).__init__()\n\n if out_size is None:\n in_size, out_size = None, in_size\n self.out_size = out_size\n\n with self.init_scope():\n W_initializer = initializers._get_initializer(initialW)\n self.W = variable.Parameter(W_initializer)\n if in_size is not None:\n self._initialize_params(in_size)\n\n if nobias:\n self.b = None\n else:\n if initial_bias is None:\n initial_bias = 0\n bias_initializer = initializers._get_initializer(initial_bias)\n self.b = variable.Parameter(bias_initializer, out_size)\n\n def _initialize_params(self, in_size):\n self.W.initialize((self.out_size, in_size))\n\n def __call__(self, x):\n \"\"\"Applies the linear layer.\n\n Args:\n x (~chainer.Variable): Batch of input vectors.\n\n Returns:\n ~chainer.Variable: Output of the linear layer.\n\n \"\"\"\n if self.W.data is None:\n self._initialize_params(x.size // x.shape[0])\n return linear.linear(x, self.W, self.b)\n", "path": "chainer/links/connection/linear.py"}]}
| 1,969 | 285 |
gh_patches_debug_32218
|
rasdani/github-patches
|
git_diff
|
conda__conda-5814
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
_pip_install_via_requirements got an unexpected keyword argument 'prune'
```
{
"command":"/home/travis/miniconda/bin/conda-env update",
"conda_info":{
"GID":2000,
"UID":2000,
"_channels":"https://conda.anaconda.org/conda-canary/linux-64
https://conda.anaconda.org/conda-canary/noarch
https://repo.continuum.io/pkgs/free/linux-64
https://repo.continuum.io/pkgs/free/noarch
https://repo.continuum.io/pkgs/r/linux-64
https://repo.continuum.io/pkgs/r/noarch
https://repo.continuum.io/pkgs/pro/linux-64
https://repo.continuum.io/pkgs/pro/noarch",
"_config_files":"/home/travis/.condarc",
"_envs_dirs":"/home/travis/miniconda/envs
/home/travis/.conda/envs",
"_pkgs_dirs":"/home/travis/miniconda/pkgs
/home/travis/.conda/pkgs",
"_rtwro":"writable",
"active_prefix":null,
"active_prefix_name":null,
"channels":[
"https://conda.anaconda.org/conda-canary/linux-64",
"https://conda.anaconda.org/conda-canary/noarch",
"https://repo.continuum.io/pkgs/free/linux-64",
"https://repo.continuum.io/pkgs/free/noarch",
"https://repo.continuum.io/pkgs/r/linux-64",
"https://repo.continuum.io/pkgs/r/noarch",
"https://repo.continuum.io/pkgs/pro/linux-64",
"https://repo.continuum.io/pkgs/pro/noarch"
],
"conda_build_version":"not installed",
"conda_env_version":"4.4.0rc0",
"conda_location":"/home/travis/miniconda/lib/python3.6/site-packages/conda",
"conda_prefix":"/home/travis/miniconda",
"conda_private":false,
"conda_shlvl":-1,
"conda_version":"4.4.0rc0",
"config_files":[
"/home/travis/.condarc"
],
"default_prefix":"/home/travis/miniconda",
"envs":[
],
"envs_dirs":[
"/home/travis/miniconda/envs",
"/home/travis/.conda/envs"
],
"netrc_file":null,
"offline":false,
"pkgs_dirs":[
"/home/travis/miniconda/pkgs",
"/home/travis/.conda/pkgs"
],
"platform":"linux-64",
"python_version":"3.6.1.final.0",
"rc_path":"/home/travis/.condarc",
"requests_version":"2.14.2",
"root_prefix":"/home/travis/miniconda",
"root_writable":true,
"sys_rc_path":"/home/travis/miniconda/.condarc",
"user_agent":"conda/4.4.0rc0 requests/2.14.2 CPython/3.6.1 Linux/4.4.0-83-generic ubuntu/14.04 glibc/2.19",
"user_rc_path":"/home/travis/.condarc"
},
"error":"TypeError(\"_pip_install_via_requirements() got an unexpected keyword argument 'prune'\",)",
"exception_name":"TypeError",
"exception_type":"<class 'TypeError'>",
"traceback":"Traceback (most recent call last):
File \"/home/travis/miniconda/lib/python3.6/site-packages/conda/exceptions.py\", line 653, in __call__
return func(*args, **kwargs)
File \"/home/travis/miniconda/lib/python3.6/site-packages/conda_env/cli/main_update.py\", line 107, in execute
installer.install(prefix, specs, args, env, prune=args.prune)
TypeError: _pip_install_via_requirements() got an unexpected keyword argument 'prune'
"
}
```
</issue>
<code>
[start of conda_env/cli/main_update.py]
1 from argparse import RawDescriptionHelpFormatter
2 import os
3 import sys
4 import textwrap
5
6 from conda._vendor.auxlib.path import expand
7 from conda.cli import install as cli_install
8 from conda.cli.conda_argparse import add_parser_json, add_parser_prefix
9 from conda.misc import touch_nonadmin
10 from .common import get_prefix
11 from .. import exceptions, specs as install_specs
12 from ..exceptions import CondaEnvException
13 from ..installers.base import InvalidInstaller, get_installer
14
15 description = """
16 Update the current environment based on environment file
17 """
18
19 example = """
20 examples:
21 conda env update
22 conda env update -n=foo
23 conda env update -f=/path/to/environment.yml
24 conda env update --name=foo --file=environment.yml
25 conda env update vader/deathstar
26 """
27
28
29 def configure_parser(sub_parsers):
30 p = sub_parsers.add_parser(
31 'update',
32 formatter_class=RawDescriptionHelpFormatter,
33 description=description,
34 help=description,
35 epilog=example,
36 )
37 add_parser_prefix(p)
38 p.add_argument(
39 '-f', '--file',
40 action='store',
41 help='environment definition (default: environment.yml)',
42 default='environment.yml',
43 )
44 p.add_argument(
45 '--prune',
46 action='store_true',
47 default=False,
48 help='remove installed packages not defined in environment.yml',
49 )
50 p.add_argument(
51 '-q', '--quiet',
52 action='store_true',
53 default=False,
54 )
55 p.add_argument(
56 'remote_definition',
57 help='remote environment definition / IPython notebook',
58 action='store',
59 default=None,
60 nargs='?'
61 )
62 add_parser_json(p)
63 p.set_defaults(func=execute)
64
65
66 def execute(args, parser):
67 name = args.remote_definition or args.name
68
69 try:
70 spec = install_specs.detect(name=name, filename=expand(args.file),
71 directory=os.getcwd())
72 env = spec.environment
73 except exceptions.SpecNotFound:
74 raise
75
76 if not (args.name or args.prefix):
77 if not env.name:
78 # Note, this is a hack fofr get_prefix that assumes argparse results
79 # TODO Refactor common.get_prefix
80 name = os.environ.get('CONDA_DEFAULT_ENV', False)
81 if not name:
82 msg = "Unable to determine environment\n\n"
83 msg += textwrap.dedent("""
84 Please re-run this command with one of the following options:
85
86 * Provide an environment name via --name or -n
87 * Re-run this command inside an activated conda environment.""").lstrip()
88 # TODO Add json support
89 raise CondaEnvException(msg)
90
91 # Note: stubbing out the args object as all of the
92 # conda.cli.common code thinks that name will always
93 # be specified.
94 args.name = env.name
95
96 prefix = get_prefix(args, search=False)
97 # CAN'T Check with this function since it assumes we will create prefix.
98 # cli_install.check_prefix(prefix, json=args.json)
99
100 # TODO, add capability
101 # common.ensure_override_channels_requires_channel(args)
102 # channel_urls = args.channel or ()
103
104 for installer_type, specs in env.dependencies.items():
105 try:
106 installer = get_installer(installer_type)
107 installer.install(prefix, specs, args, env, prune=args.prune)
108 except InvalidInstaller:
109 sys.stderr.write(textwrap.dedent("""
110 Unable to install package for {0}.
111
112 Please double check and ensure you dependencies file has
113 the correct spelling. You might also try installing the
114 conda-env-{0} package to see if provides the required
115 installer.
116 """).lstrip().format(installer_type)
117 )
118 return -1
119
120 touch_nonadmin(prefix)
121 cli_install.print_activate(args.name if args.name else prefix)
122
[end of conda_env/cli/main_update.py]
[start of conda_env/installers/conda.py]
1 from __future__ import absolute_import
2
3 from os.path import basename
4
5 from conda._vendor.boltons.setutils import IndexedSet
6 from conda.base.context import context
7 from conda.core.solve import Solver
8 from conda.models.channel import Channel, prioritize_channels
9
10
11 def install(prefix, specs, args, env, prune=False):
12 # TODO: support all various ways this happens
13 # Including 'nodefaults' in the channels list disables the defaults
14 new_specs = []
15 channel_urls = set()
16 for elem in specs:
17 if "::" in elem:
18 channel_urls.add(elem.split("::")[0])
19 new_specs.append(elem.split("::")[-1])
20 else:
21 new_specs.append(elem)
22 specs = new_specs
23 channel_urls = list(channel_urls)
24 # TODO: support all various ways this happens
25 # Including 'nodefaults' in the channels list disables the defaults
26 channel_urls = channel_urls + [chan for chan in env.channels if chan != 'nodefaults']
27 if 'nodefaults' not in env.channels:
28 channel_urls.extend(context.channels)
29 _channel_priority_map = prioritize_channels(channel_urls)
30
31 channel_names = IndexedSet(Channel(url).canonical_name for url in _channel_priority_map)
32 channels = IndexedSet(Channel(cn) for cn in channel_names)
33 subdirs = IndexedSet(basename(url) for url in _channel_priority_map)
34
35 solver = Solver(prefix, channels, subdirs, specs_to_add=specs)
36 unlink_link_transaction = solver.solve_for_transaction(prune=prune)
37
38 pfe = unlink_link_transaction.get_pfe()
39 pfe.execute()
40 unlink_link_transaction.execute()
41
[end of conda_env/installers/conda.py]
[start of conda_env/installers/pip.py]
1 from __future__ import absolute_import
2
3 import os
4 import os.path as op
5 import subprocess
6 import tempfile
7 from conda_env.pip_util import pip_args
8 from conda.exceptions import CondaValueError
9
10
11 def _pip_install_via_requirements(prefix, specs, args, *_):
12 """
13 Installs the pip dependencies in specs using a temporary pip requirements file.
14
15 Args
16 ----
17 prefix: string
18 The path to the python and pip executables.
19
20 specs: iterable of strings
21 Each element should be a valid pip dependency.
22 See: https://pip.pypa.io/en/stable/user_guide/#requirements-files
23 https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format
24 """
25 try:
26 pip_workdir = op.dirname(op.abspath(args.file))
27 except AttributeError:
28 pip_workdir = None
29 requirements = None
30 try:
31 # Generate the temporary requirements file
32 requirements = tempfile.NamedTemporaryFile(mode='w',
33 prefix='condaenv.',
34 suffix='.requirements.txt',
35 dir=pip_workdir,
36 delete=False)
37 requirements.write('\n'.join(specs))
38 requirements.close()
39 # pip command line...
40 pip_cmd = pip_args(prefix) + ['install', '-r', requirements.name]
41 # ...run it
42 process = subprocess.Popen(pip_cmd,
43 cwd=pip_workdir,
44 universal_newlines=True)
45 process.communicate()
46 if process.returncode != 0:
47 raise CondaValueError("pip returned an error")
48 finally:
49 # Win/Appveyor does not like it if we use context manager + delete=True.
50 # So we delete the temporary file in a finally block.
51 if requirements is not None and op.isfile(requirements.name):
52 os.remove(requirements.name)
53
54
55 # Conform to Installers API
56 install = _pip_install_via_requirements
57
[end of conda_env/installers/pip.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/conda_env/cli/main_update.py b/conda_env/cli/main_update.py
--- a/conda_env/cli/main_update.py
+++ b/conda_env/cli/main_update.py
@@ -104,7 +104,7 @@
for installer_type, specs in env.dependencies.items():
try:
installer = get_installer(installer_type)
- installer.install(prefix, specs, args, env, prune=args.prune)
+ installer.install(prefix, specs, args, env)
except InvalidInstaller:
sys.stderr.write(textwrap.dedent("""
Unable to install package for {0}.
diff --git a/conda_env/installers/conda.py b/conda_env/installers/conda.py
--- a/conda_env/installers/conda.py
+++ b/conda_env/installers/conda.py
@@ -8,7 +8,7 @@
from conda.models.channel import Channel, prioritize_channels
-def install(prefix, specs, args, env, prune=False):
+def install(prefix, specs, args, env, *_, **kwargs):
# TODO: support all various ways this happens
# Including 'nodefaults' in the channels list disables the defaults
new_specs = []
@@ -33,7 +33,7 @@
subdirs = IndexedSet(basename(url) for url in _channel_priority_map)
solver = Solver(prefix, channels, subdirs, specs_to_add=specs)
- unlink_link_transaction = solver.solve_for_transaction(prune=prune)
+ unlink_link_transaction = solver.solve_for_transaction(prune=getattr(args, 'prune', False))
pfe = unlink_link_transaction.get_pfe()
pfe.execute()
diff --git a/conda_env/installers/pip.py b/conda_env/installers/pip.py
--- a/conda_env/installers/pip.py
+++ b/conda_env/installers/pip.py
@@ -8,7 +8,7 @@
from conda.exceptions import CondaValueError
-def _pip_install_via_requirements(prefix, specs, args, *_):
+def _pip_install_via_requirements(prefix, specs, args, *_, **kwargs):
"""
Installs the pip dependencies in specs using a temporary pip requirements file.
|
{"golden_diff": "diff --git a/conda_env/cli/main_update.py b/conda_env/cli/main_update.py\n--- a/conda_env/cli/main_update.py\n+++ b/conda_env/cli/main_update.py\n@@ -104,7 +104,7 @@\n for installer_type, specs in env.dependencies.items():\n try:\n installer = get_installer(installer_type)\n- installer.install(prefix, specs, args, env, prune=args.prune)\n+ installer.install(prefix, specs, args, env)\n except InvalidInstaller:\n sys.stderr.write(textwrap.dedent(\"\"\"\n Unable to install package for {0}.\ndiff --git a/conda_env/installers/conda.py b/conda_env/installers/conda.py\n--- a/conda_env/installers/conda.py\n+++ b/conda_env/installers/conda.py\n@@ -8,7 +8,7 @@\n from conda.models.channel import Channel, prioritize_channels\n \n \n-def install(prefix, specs, args, env, prune=False):\n+def install(prefix, specs, args, env, *_, **kwargs):\n # TODO: support all various ways this happens\n # Including 'nodefaults' in the channels list disables the defaults\n new_specs = []\n@@ -33,7 +33,7 @@\n subdirs = IndexedSet(basename(url) for url in _channel_priority_map)\n \n solver = Solver(prefix, channels, subdirs, specs_to_add=specs)\n- unlink_link_transaction = solver.solve_for_transaction(prune=prune)\n+ unlink_link_transaction = solver.solve_for_transaction(prune=getattr(args, 'prune', False))\n \n pfe = unlink_link_transaction.get_pfe()\n pfe.execute()\ndiff --git a/conda_env/installers/pip.py b/conda_env/installers/pip.py\n--- a/conda_env/installers/pip.py\n+++ b/conda_env/installers/pip.py\n@@ -8,7 +8,7 @@\n from conda.exceptions import CondaValueError\n \n \n-def _pip_install_via_requirements(prefix, specs, args, *_):\n+def _pip_install_via_requirements(prefix, specs, args, *_, **kwargs):\n \"\"\"\n Installs the pip dependencies in specs using a temporary pip requirements file.\n", "issue": "_pip_install_via_requirements got an unexpected keyword argument 'prune'\n```\r\n{\r\n \"command\":\"/home/travis/miniconda/bin/conda-env update\",\r\n \"conda_info\":{\r\n \"GID\":2000,\r\n \"UID\":2000,\r\n \"_channels\":\"https://conda.anaconda.org/conda-canary/linux-64\r\n https://conda.anaconda.org/conda-canary/noarch\r\n https://repo.continuum.io/pkgs/free/linux-64\r\n https://repo.continuum.io/pkgs/free/noarch\r\n https://repo.continuum.io/pkgs/r/linux-64\r\n https://repo.continuum.io/pkgs/r/noarch\r\n https://repo.continuum.io/pkgs/pro/linux-64\r\n https://repo.continuum.io/pkgs/pro/noarch\",\r\n \"_config_files\":\"/home/travis/.condarc\",\r\n \"_envs_dirs\":\"/home/travis/miniconda/envs\r\n /home/travis/.conda/envs\",\r\n \"_pkgs_dirs\":\"/home/travis/miniconda/pkgs\r\n /home/travis/.conda/pkgs\",\r\n \"_rtwro\":\"writable\",\r\n \"active_prefix\":null,\r\n \"active_prefix_name\":null,\r\n \"channels\":[\r\n \"https://conda.anaconda.org/conda-canary/linux-64\",\r\n \"https://conda.anaconda.org/conda-canary/noarch\",\r\n \"https://repo.continuum.io/pkgs/free/linux-64\",\r\n \"https://repo.continuum.io/pkgs/free/noarch\",\r\n \"https://repo.continuum.io/pkgs/r/linux-64\",\r\n \"https://repo.continuum.io/pkgs/r/noarch\",\r\n \"https://repo.continuum.io/pkgs/pro/linux-64\",\r\n \"https://repo.continuum.io/pkgs/pro/noarch\"\r\n ],\r\n \"conda_build_version\":\"not installed\",\r\n \"conda_env_version\":\"4.4.0rc0\",\r\n \"conda_location\":\"/home/travis/miniconda/lib/python3.6/site-packages/conda\",\r\n \"conda_prefix\":\"/home/travis/miniconda\",\r\n \"conda_private\":false,\r\n \"conda_shlvl\":-1,\r\n \"conda_version\":\"4.4.0rc0\",\r\n \"config_files\":[\r\n \"/home/travis/.condarc\"\r\n ],\r\n \"default_prefix\":\"/home/travis/miniconda\",\r\n \"envs\":[\r\n\r\n ],\r\n \"envs_dirs\":[\r\n \"/home/travis/miniconda/envs\",\r\n \"/home/travis/.conda/envs\"\r\n ],\r\n \"netrc_file\":null,\r\n \"offline\":false,\r\n \"pkgs_dirs\":[\r\n \"/home/travis/miniconda/pkgs\",\r\n \"/home/travis/.conda/pkgs\"\r\n ],\r\n \"platform\":\"linux-64\",\r\n \"python_version\":\"3.6.1.final.0\",\r\n \"rc_path\":\"/home/travis/.condarc\",\r\n \"requests_version\":\"2.14.2\",\r\n \"root_prefix\":\"/home/travis/miniconda\",\r\n \"root_writable\":true,\r\n \"sys_rc_path\":\"/home/travis/miniconda/.condarc\",\r\n \"user_agent\":\"conda/4.4.0rc0 requests/2.14.2 CPython/3.6.1 Linux/4.4.0-83-generic ubuntu/14.04 glibc/2.19\",\r\n \"user_rc_path\":\"/home/travis/.condarc\"\r\n },\r\n \"error\":\"TypeError(\\\"_pip_install_via_requirements() got an unexpected keyword argument 'prune'\\\",)\",\r\n \"exception_name\":\"TypeError\",\r\n \"exception_type\":\"<class 'TypeError'>\",\r\n \"traceback\":\"Traceback (most recent call last):\r\n File \\\"/home/travis/miniconda/lib/python3.6/site-packages/conda/exceptions.py\\\", line 653, in __call__\r\n return func(*args, **kwargs)\r\n File \\\"/home/travis/miniconda/lib/python3.6/site-packages/conda_env/cli/main_update.py\\\", line 107, in execute\r\n installer.install(prefix, specs, args, env, prune=args.prune)\r\nTypeError: _pip_install_via_requirements() got an unexpected keyword argument 'prune'\r\n\"\r\n}\r\n```\n", "before_files": [{"content": "from argparse import RawDescriptionHelpFormatter\nimport os\nimport sys\nimport textwrap\n\nfrom conda._vendor.auxlib.path import expand\nfrom conda.cli import install as cli_install\nfrom conda.cli.conda_argparse import add_parser_json, add_parser_prefix\nfrom conda.misc import touch_nonadmin\nfrom .common import get_prefix\nfrom .. import exceptions, specs as install_specs\nfrom ..exceptions import CondaEnvException\nfrom ..installers.base import InvalidInstaller, get_installer\n\ndescription = \"\"\"\nUpdate the current environment based on environment file\n\"\"\"\n\nexample = \"\"\"\nexamples:\n conda env update\n conda env update -n=foo\n conda env update -f=/path/to/environment.yml\n conda env update --name=foo --file=environment.yml\n conda env update vader/deathstar\n\"\"\"\n\n\ndef configure_parser(sub_parsers):\n p = sub_parsers.add_parser(\n 'update',\n formatter_class=RawDescriptionHelpFormatter,\n description=description,\n help=description,\n epilog=example,\n )\n add_parser_prefix(p)\n p.add_argument(\n '-f', '--file',\n action='store',\n help='environment definition (default: environment.yml)',\n default='environment.yml',\n )\n p.add_argument(\n '--prune',\n action='store_true',\n default=False,\n help='remove installed packages not defined in environment.yml',\n )\n p.add_argument(\n '-q', '--quiet',\n action='store_true',\n default=False,\n )\n p.add_argument(\n 'remote_definition',\n help='remote environment definition / IPython notebook',\n action='store',\n default=None,\n nargs='?'\n )\n add_parser_json(p)\n p.set_defaults(func=execute)\n\n\ndef execute(args, parser):\n name = args.remote_definition or args.name\n\n try:\n spec = install_specs.detect(name=name, filename=expand(args.file),\n directory=os.getcwd())\n env = spec.environment\n except exceptions.SpecNotFound:\n raise\n\n if not (args.name or args.prefix):\n if not env.name:\n # Note, this is a hack fofr get_prefix that assumes argparse results\n # TODO Refactor common.get_prefix\n name = os.environ.get('CONDA_DEFAULT_ENV', False)\n if not name:\n msg = \"Unable to determine environment\\n\\n\"\n msg += textwrap.dedent(\"\"\"\n Please re-run this command with one of the following options:\n\n * Provide an environment name via --name or -n\n * Re-run this command inside an activated conda environment.\"\"\").lstrip()\n # TODO Add json support\n raise CondaEnvException(msg)\n\n # Note: stubbing out the args object as all of the\n # conda.cli.common code thinks that name will always\n # be specified.\n args.name = env.name\n\n prefix = get_prefix(args, search=False)\n # CAN'T Check with this function since it assumes we will create prefix.\n # cli_install.check_prefix(prefix, json=args.json)\n\n # TODO, add capability\n # common.ensure_override_channels_requires_channel(args)\n # channel_urls = args.channel or ()\n\n for installer_type, specs in env.dependencies.items():\n try:\n installer = get_installer(installer_type)\n installer.install(prefix, specs, args, env, prune=args.prune)\n except InvalidInstaller:\n sys.stderr.write(textwrap.dedent(\"\"\"\n Unable to install package for {0}.\n\n Please double check and ensure you dependencies file has\n the correct spelling. You might also try installing the\n conda-env-{0} package to see if provides the required\n installer.\n \"\"\").lstrip().format(installer_type)\n )\n return -1\n\n touch_nonadmin(prefix)\n cli_install.print_activate(args.name if args.name else prefix)\n", "path": "conda_env/cli/main_update.py"}, {"content": "from __future__ import absolute_import\n\nfrom os.path import basename\n\nfrom conda._vendor.boltons.setutils import IndexedSet\nfrom conda.base.context import context\nfrom conda.core.solve import Solver\nfrom conda.models.channel import Channel, prioritize_channels\n\n\ndef install(prefix, specs, args, env, prune=False):\n # TODO: support all various ways this happens\n # Including 'nodefaults' in the channels list disables the defaults\n new_specs = []\n channel_urls = set()\n for elem in specs:\n if \"::\" in elem:\n channel_urls.add(elem.split(\"::\")[0])\n new_specs.append(elem.split(\"::\")[-1])\n else:\n new_specs.append(elem)\n specs = new_specs\n channel_urls = list(channel_urls)\n # TODO: support all various ways this happens\n # Including 'nodefaults' in the channels list disables the defaults\n channel_urls = channel_urls + [chan for chan in env.channels if chan != 'nodefaults']\n if 'nodefaults' not in env.channels:\n channel_urls.extend(context.channels)\n _channel_priority_map = prioritize_channels(channel_urls)\n\n channel_names = IndexedSet(Channel(url).canonical_name for url in _channel_priority_map)\n channels = IndexedSet(Channel(cn) for cn in channel_names)\n subdirs = IndexedSet(basename(url) for url in _channel_priority_map)\n\n solver = Solver(prefix, channels, subdirs, specs_to_add=specs)\n unlink_link_transaction = solver.solve_for_transaction(prune=prune)\n\n pfe = unlink_link_transaction.get_pfe()\n pfe.execute()\n unlink_link_transaction.execute()\n", "path": "conda_env/installers/conda.py"}, {"content": "from __future__ import absolute_import\n\nimport os\nimport os.path as op\nimport subprocess\nimport tempfile\nfrom conda_env.pip_util import pip_args\nfrom conda.exceptions import CondaValueError\n\n\ndef _pip_install_via_requirements(prefix, specs, args, *_):\n \"\"\"\n Installs the pip dependencies in specs using a temporary pip requirements file.\n\n Args\n ----\n prefix: string\n The path to the python and pip executables.\n\n specs: iterable of strings\n Each element should be a valid pip dependency.\n See: https://pip.pypa.io/en/stable/user_guide/#requirements-files\n https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format\n \"\"\"\n try:\n pip_workdir = op.dirname(op.abspath(args.file))\n except AttributeError:\n pip_workdir = None\n requirements = None\n try:\n # Generate the temporary requirements file\n requirements = tempfile.NamedTemporaryFile(mode='w',\n prefix='condaenv.',\n suffix='.requirements.txt',\n dir=pip_workdir,\n delete=False)\n requirements.write('\\n'.join(specs))\n requirements.close()\n # pip command line...\n pip_cmd = pip_args(prefix) + ['install', '-r', requirements.name]\n # ...run it\n process = subprocess.Popen(pip_cmd,\n cwd=pip_workdir,\n universal_newlines=True)\n process.communicate()\n if process.returncode != 0:\n raise CondaValueError(\"pip returned an error\")\n finally:\n # Win/Appveyor does not like it if we use context manager + delete=True.\n # So we delete the temporary file in a finally block.\n if requirements is not None and op.isfile(requirements.name):\n os.remove(requirements.name)\n\n\n# Conform to Installers API\ninstall = _pip_install_via_requirements\n", "path": "conda_env/installers/pip.py"}]}
| 3,549 | 475 |
gh_patches_debug_29348
|
rasdani/github-patches
|
git_diff
|
cookiecutter__cookiecutter-1569
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add support for click 8.0.0
### Description:
We should add support to click 8.0.0, there are some breaking changes like #1558
</issue>
<code>
[start of cookiecutter/prompt.py]
1 """Functions for prompting the user for project info."""
2 import json
3 from collections import OrderedDict
4
5 import click
6 from jinja2.exceptions import UndefinedError
7
8 from cookiecutter.environment import StrictEnvironment
9 from cookiecutter.exceptions import UndefinedVariableInTemplate
10
11
12 def read_user_variable(var_name, default_value):
13 """Prompt user for variable and return the entered value or given default.
14
15 :param str var_name: Variable of the context to query the user
16 :param default_value: Value that will be returned if no input happens
17 """
18 # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt
19 return click.prompt(var_name, default=default_value)
20
21
22 def read_user_yes_no(question, default_value):
23 """Prompt the user to reply with 'yes' or 'no' (or equivalent values).
24
25 Note:
26 Possible choices are 'true', '1', 'yes', 'y' or 'false', '0', 'no', 'n'
27
28 :param str question: Question to the user
29 :param default_value: Value that will be returned if no input happens
30 """
31 # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt
32 return click.prompt(question, default=default_value, type=click.BOOL)
33
34
35 def read_repo_password(question):
36 """Prompt the user to enter a password.
37
38 :param str question: Question to the user
39 """
40 # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt
41 return click.prompt(question, hide_input=True)
42
43
44 def read_user_choice(var_name, options):
45 """Prompt the user to choose from several options for the given variable.
46
47 The first item will be returned if no input happens.
48
49 :param str var_name: Variable as specified in the context
50 :param list options: Sequence of options that are available to select from
51 :return: Exactly one item of ``options`` that has been chosen by the user
52 """
53 # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt
54 if not isinstance(options, list):
55 raise TypeError
56
57 if not options:
58 raise ValueError
59
60 choice_map = OrderedDict(
61 ('{}'.format(i), value) for i, value in enumerate(options, 1)
62 )
63 choices = choice_map.keys()
64 default = '1'
65
66 choice_lines = ['{} - {}'.format(*c) for c in choice_map.items()]
67 prompt = '\n'.join(
68 (
69 'Select {}:'.format(var_name),
70 '\n'.join(choice_lines),
71 'Choose from {}'.format(', '.join(choices)),
72 )
73 )
74
75 user_choice = click.prompt(
76 prompt, type=click.Choice(choices), default=default, show_choices=False
77 )
78 return choice_map[user_choice]
79
80
81 def process_json(user_value):
82 """Load user-supplied value as a JSON dict.
83
84 :param str user_value: User-supplied value to load as a JSON dict
85 """
86 try:
87 user_dict = json.loads(user_value, object_pairs_hook=OrderedDict)
88 except Exception:
89 # Leave it up to click to ask the user again
90 raise click.UsageError('Unable to decode to JSON.')
91
92 if not isinstance(user_dict, dict):
93 # Leave it up to click to ask the user again
94 raise click.UsageError('Requires JSON dict.')
95
96 return user_dict
97
98
99 def read_user_dict(var_name, default_value):
100 """Prompt the user to provide a dictionary of data.
101
102 :param str var_name: Variable as specified in the context
103 :param default_value: Value that will be returned if no input is provided
104 :return: A Python dictionary to use in the context.
105 """
106 # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt
107 if not isinstance(default_value, dict):
108 raise TypeError
109
110 default_display = 'default'
111
112 user_value = click.prompt(
113 var_name, default=default_display, type=click.STRING, value_proc=process_json
114 )
115
116 if user_value == default_display:
117 # Return the given default w/o any processing
118 return default_value
119 return user_value
120
121
122 def render_variable(env, raw, cookiecutter_dict):
123 """Render the next variable to be displayed in the user prompt.
124
125 Inside the prompting taken from the cookiecutter.json file, this renders
126 the next variable. For example, if a project_name is "Peanut Butter
127 Cookie", the repo_name could be be rendered with:
128
129 `{{ cookiecutter.project_name.replace(" ", "_") }}`.
130
131 This is then presented to the user as the default.
132
133 :param Environment env: A Jinja2 Environment object.
134 :param raw: The next value to be prompted for by the user.
135 :param dict cookiecutter_dict: The current context as it's gradually
136 being populated with variables.
137 :return: The rendered value for the default variable.
138 """
139 if raw is None:
140 return None
141 elif isinstance(raw, dict):
142 return {
143 render_variable(env, k, cookiecutter_dict): render_variable(
144 env, v, cookiecutter_dict
145 )
146 for k, v in raw.items()
147 }
148 elif isinstance(raw, list):
149 return [render_variable(env, v, cookiecutter_dict) for v in raw]
150 elif not isinstance(raw, str):
151 raw = str(raw)
152
153 template = env.from_string(raw)
154
155 rendered_template = template.render(cookiecutter=cookiecutter_dict)
156 return rendered_template
157
158
159 def prompt_choice_for_config(cookiecutter_dict, env, key, options, no_input):
160 """Prompt user with a set of options to choose from.
161
162 Each of the possible choices is rendered beforehand.
163 """
164 rendered_options = [render_variable(env, raw, cookiecutter_dict) for raw in options]
165
166 if no_input:
167 return rendered_options[0]
168 return read_user_choice(key, rendered_options)
169
170
171 def prompt_for_config(context, no_input=False):
172 """Prompt user to enter a new config.
173
174 :param dict context: Source for field names and sample values.
175 :param no_input: Prompt the user at command line for manual configuration?
176 """
177 cookiecutter_dict = OrderedDict([])
178 env = StrictEnvironment(context=context)
179
180 # First pass: Handle simple and raw variables, plus choices.
181 # These must be done first because the dictionaries keys and
182 # values might refer to them.
183 for key, raw in context['cookiecutter'].items():
184 if key.startswith('_') and not key.startswith('__'):
185 cookiecutter_dict[key] = raw
186 continue
187 elif key.startswith('__'):
188 cookiecutter_dict[key] = render_variable(env, raw, cookiecutter_dict)
189 continue
190
191 try:
192 if isinstance(raw, list):
193 # We are dealing with a choice variable
194 val = prompt_choice_for_config(
195 cookiecutter_dict, env, key, raw, no_input
196 )
197 cookiecutter_dict[key] = val
198 elif not isinstance(raw, dict):
199 # We are dealing with a regular variable
200 val = render_variable(env, raw, cookiecutter_dict)
201
202 if not no_input:
203 val = read_user_variable(key, val)
204
205 cookiecutter_dict[key] = val
206 except UndefinedError as err:
207 msg = "Unable to render variable '{}'".format(key)
208 raise UndefinedVariableInTemplate(msg, err, context)
209
210 # Second pass; handle the dictionaries.
211 for key, raw in context['cookiecutter'].items():
212 # Skip private type dicts not ot be rendered.
213 if key.startswith('_') and not key.startswith('__'):
214 continue
215
216 try:
217 if isinstance(raw, dict):
218 # We are dealing with a dict variable
219 val = render_variable(env, raw, cookiecutter_dict)
220
221 if not no_input and not key.startswith('__'):
222 val = read_user_dict(key, val)
223
224 cookiecutter_dict[key] = val
225 except UndefinedError as err:
226 msg = "Unable to render variable '{}'".format(key)
227 raise UndefinedVariableInTemplate(msg, err, context)
228
229 return cookiecutter_dict
230
[end of cookiecutter/prompt.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cookiecutter/prompt.py b/cookiecutter/prompt.py
--- a/cookiecutter/prompt.py
+++ b/cookiecutter/prompt.py
@@ -1,4 +1,5 @@
"""Functions for prompting the user for project info."""
+import functools
import json
from collections import OrderedDict
@@ -78,11 +79,18 @@
return choice_map[user_choice]
-def process_json(user_value):
+DEFAULT_DISPLAY = 'default'
+
+
+def process_json(user_value, default_value=None):
"""Load user-supplied value as a JSON dict.
:param str user_value: User-supplied value to load as a JSON dict
"""
+ if user_value == DEFAULT_DISPLAY:
+ # Return the given default w/o any processing
+ return default_value
+
try:
user_dict = json.loads(user_value, object_pairs_hook=OrderedDict)
except Exception:
@@ -107,15 +115,16 @@
if not isinstance(default_value, dict):
raise TypeError
- default_display = 'default'
-
user_value = click.prompt(
- var_name, default=default_display, type=click.STRING, value_proc=process_json
+ var_name,
+ default=DEFAULT_DISPLAY,
+ type=click.STRING,
+ value_proc=functools.partial(process_json, default_value=default_value),
)
- if user_value == default_display:
- # Return the given default w/o any processing
- return default_value
+ if click.__version__.startswith("7.") and user_value == DEFAULT_DISPLAY:
+ # click 7.x does not invoke value_proc on the default value.
+ return default_value # pragma: no cover
return user_value
|
{"golden_diff": "diff --git a/cookiecutter/prompt.py b/cookiecutter/prompt.py\n--- a/cookiecutter/prompt.py\n+++ b/cookiecutter/prompt.py\n@@ -1,4 +1,5 @@\n \"\"\"Functions for prompting the user for project info.\"\"\"\n+import functools\n import json\n from collections import OrderedDict\n \n@@ -78,11 +79,18 @@\n return choice_map[user_choice]\n \n \n-def process_json(user_value):\n+DEFAULT_DISPLAY = 'default'\n+\n+\n+def process_json(user_value, default_value=None):\n \"\"\"Load user-supplied value as a JSON dict.\n \n :param str user_value: User-supplied value to load as a JSON dict\n \"\"\"\n+ if user_value == DEFAULT_DISPLAY:\n+ # Return the given default w/o any processing\n+ return default_value\n+\n try:\n user_dict = json.loads(user_value, object_pairs_hook=OrderedDict)\n except Exception:\n@@ -107,15 +115,16 @@\n if not isinstance(default_value, dict):\n raise TypeError\n \n- default_display = 'default'\n-\n user_value = click.prompt(\n- var_name, default=default_display, type=click.STRING, value_proc=process_json\n+ var_name,\n+ default=DEFAULT_DISPLAY,\n+ type=click.STRING,\n+ value_proc=functools.partial(process_json, default_value=default_value),\n )\n \n- if user_value == default_display:\n- # Return the given default w/o any processing\n- return default_value\n+ if click.__version__.startswith(\"7.\") and user_value == DEFAULT_DISPLAY:\n+ # click 7.x does not invoke value_proc on the default value.\n+ return default_value # pragma: no cover\n return user_value\n", "issue": "Add support for click 8.0.0\n### Description:\r\n\r\nWe should add support to click 8.0.0, there are some breaking changes like #1558 \r\n\n", "before_files": [{"content": "\"\"\"Functions for prompting the user for project info.\"\"\"\nimport json\nfrom collections import OrderedDict\n\nimport click\nfrom jinja2.exceptions import UndefinedError\n\nfrom cookiecutter.environment import StrictEnvironment\nfrom cookiecutter.exceptions import UndefinedVariableInTemplate\n\n\ndef read_user_variable(var_name, default_value):\n \"\"\"Prompt user for variable and return the entered value or given default.\n\n :param str var_name: Variable of the context to query the user\n :param default_value: Value that will be returned if no input happens\n \"\"\"\n # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt\n return click.prompt(var_name, default=default_value)\n\n\ndef read_user_yes_no(question, default_value):\n \"\"\"Prompt the user to reply with 'yes' or 'no' (or equivalent values).\n\n Note:\n Possible choices are 'true', '1', 'yes', 'y' or 'false', '0', 'no', 'n'\n\n :param str question: Question to the user\n :param default_value: Value that will be returned if no input happens\n \"\"\"\n # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt\n return click.prompt(question, default=default_value, type=click.BOOL)\n\n\ndef read_repo_password(question):\n \"\"\"Prompt the user to enter a password.\n\n :param str question: Question to the user\n \"\"\"\n # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt\n return click.prompt(question, hide_input=True)\n\n\ndef read_user_choice(var_name, options):\n \"\"\"Prompt the user to choose from several options for the given variable.\n\n The first item will be returned if no input happens.\n\n :param str var_name: Variable as specified in the context\n :param list options: Sequence of options that are available to select from\n :return: Exactly one item of ``options`` that has been chosen by the user\n \"\"\"\n # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt\n if not isinstance(options, list):\n raise TypeError\n\n if not options:\n raise ValueError\n\n choice_map = OrderedDict(\n ('{}'.format(i), value) for i, value in enumerate(options, 1)\n )\n choices = choice_map.keys()\n default = '1'\n\n choice_lines = ['{} - {}'.format(*c) for c in choice_map.items()]\n prompt = '\\n'.join(\n (\n 'Select {}:'.format(var_name),\n '\\n'.join(choice_lines),\n 'Choose from {}'.format(', '.join(choices)),\n )\n )\n\n user_choice = click.prompt(\n prompt, type=click.Choice(choices), default=default, show_choices=False\n )\n return choice_map[user_choice]\n\n\ndef process_json(user_value):\n \"\"\"Load user-supplied value as a JSON dict.\n\n :param str user_value: User-supplied value to load as a JSON dict\n \"\"\"\n try:\n user_dict = json.loads(user_value, object_pairs_hook=OrderedDict)\n except Exception:\n # Leave it up to click to ask the user again\n raise click.UsageError('Unable to decode to JSON.')\n\n if not isinstance(user_dict, dict):\n # Leave it up to click to ask the user again\n raise click.UsageError('Requires JSON dict.')\n\n return user_dict\n\n\ndef read_user_dict(var_name, default_value):\n \"\"\"Prompt the user to provide a dictionary of data.\n\n :param str var_name: Variable as specified in the context\n :param default_value: Value that will be returned if no input is provided\n :return: A Python dictionary to use in the context.\n \"\"\"\n # Please see https://click.palletsprojects.com/en/7.x/api/#click.prompt\n if not isinstance(default_value, dict):\n raise TypeError\n\n default_display = 'default'\n\n user_value = click.prompt(\n var_name, default=default_display, type=click.STRING, value_proc=process_json\n )\n\n if user_value == default_display:\n # Return the given default w/o any processing\n return default_value\n return user_value\n\n\ndef render_variable(env, raw, cookiecutter_dict):\n \"\"\"Render the next variable to be displayed in the user prompt.\n\n Inside the prompting taken from the cookiecutter.json file, this renders\n the next variable. For example, if a project_name is \"Peanut Butter\n Cookie\", the repo_name could be be rendered with:\n\n `{{ cookiecutter.project_name.replace(\" \", \"_\") }}`.\n\n This is then presented to the user as the default.\n\n :param Environment env: A Jinja2 Environment object.\n :param raw: The next value to be prompted for by the user.\n :param dict cookiecutter_dict: The current context as it's gradually\n being populated with variables.\n :return: The rendered value for the default variable.\n \"\"\"\n if raw is None:\n return None\n elif isinstance(raw, dict):\n return {\n render_variable(env, k, cookiecutter_dict): render_variable(\n env, v, cookiecutter_dict\n )\n for k, v in raw.items()\n }\n elif isinstance(raw, list):\n return [render_variable(env, v, cookiecutter_dict) for v in raw]\n elif not isinstance(raw, str):\n raw = str(raw)\n\n template = env.from_string(raw)\n\n rendered_template = template.render(cookiecutter=cookiecutter_dict)\n return rendered_template\n\n\ndef prompt_choice_for_config(cookiecutter_dict, env, key, options, no_input):\n \"\"\"Prompt user with a set of options to choose from.\n\n Each of the possible choices is rendered beforehand.\n \"\"\"\n rendered_options = [render_variable(env, raw, cookiecutter_dict) for raw in options]\n\n if no_input:\n return rendered_options[0]\n return read_user_choice(key, rendered_options)\n\n\ndef prompt_for_config(context, no_input=False):\n \"\"\"Prompt user to enter a new config.\n\n :param dict context: Source for field names and sample values.\n :param no_input: Prompt the user at command line for manual configuration?\n \"\"\"\n cookiecutter_dict = OrderedDict([])\n env = StrictEnvironment(context=context)\n\n # First pass: Handle simple and raw variables, plus choices.\n # These must be done first because the dictionaries keys and\n # values might refer to them.\n for key, raw in context['cookiecutter'].items():\n if key.startswith('_') and not key.startswith('__'):\n cookiecutter_dict[key] = raw\n continue\n elif key.startswith('__'):\n cookiecutter_dict[key] = render_variable(env, raw, cookiecutter_dict)\n continue\n\n try:\n if isinstance(raw, list):\n # We are dealing with a choice variable\n val = prompt_choice_for_config(\n cookiecutter_dict, env, key, raw, no_input\n )\n cookiecutter_dict[key] = val\n elif not isinstance(raw, dict):\n # We are dealing with a regular variable\n val = render_variable(env, raw, cookiecutter_dict)\n\n if not no_input:\n val = read_user_variable(key, val)\n\n cookiecutter_dict[key] = val\n except UndefinedError as err:\n msg = \"Unable to render variable '{}'\".format(key)\n raise UndefinedVariableInTemplate(msg, err, context)\n\n # Second pass; handle the dictionaries.\n for key, raw in context['cookiecutter'].items():\n # Skip private type dicts not ot be rendered.\n if key.startswith('_') and not key.startswith('__'):\n continue\n\n try:\n if isinstance(raw, dict):\n # We are dealing with a dict variable\n val = render_variable(env, raw, cookiecutter_dict)\n\n if not no_input and not key.startswith('__'):\n val = read_user_dict(key, val)\n\n cookiecutter_dict[key] = val\n except UndefinedError as err:\n msg = \"Unable to render variable '{}'\".format(key)\n raise UndefinedVariableInTemplate(msg, err, context)\n\n return cookiecutter_dict\n", "path": "cookiecutter/prompt.py"}]}
| 2,943 | 387 |
gh_patches_debug_12465
|
rasdani/github-patches
|
git_diff
|
deis__deis-289
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Refresh EC2 AMIs for 0.2.0 release
Having the AMIs up-to-date speeds node scaling time somewhat, and it's a slow but mostly-automated process so let's do this each release.
(Ditto for Rackspace, Vagrant .box images on S3, and DigitalOcean when we get there for each of them...)
</issue>
<code>
[start of provider/ec2.py]
1 """
2 Deis cloud provider implementation for Amazon EC2.
3 """
4
5 from __future__ import unicode_literals
6
7 import json
8 import time
9
10 from boto import ec2
11 from boto.exception import EC2ResponseError
12
13 # from api.ssh import connect_ssh, exec_ssh
14 from deis import settings
15
16
17 # Deis-optimized EC2 amis -- with 3.8 kernel, chef 11 deps,
18 # and large docker images (e.g. buildstep) pre-installed
19 IMAGE_MAP = {
20 'ap-northeast-1': 'ami-0b45de0a',
21 'ap-southeast-1': 'ami-eaf9b3b8',
22 'ap-southeast-2': 'ami-e9970bd3',
23 'eu-west-1': 'ami-c021c1b7',
24 'sa-east-1': 'ami-b5da7ca8',
25 'us-east-1': 'ami-a30b57ca',
26 'us-west-1': 'ami-30e3d475',
27 'us-west-2': 'ami-ca63fafa',
28 }
29
30
31 def seed_flavors():
32 """Seed the database with default flavors for each EC2 region.
33
34 :rtype: list of dicts containing flavor data
35 """
36 flavors = []
37 for r in ('us-east-1', 'us-west-1', 'us-west-2', 'eu-west-1',
38 'ap-northeast-1', 'ap-southeast-1', 'ap-southeast-2',
39 'sa-east-1'):
40 flavors.append({'id': 'ec2-{}'.format(r),
41 'provider': 'ec2',
42 'params': json.dumps({
43 'region': r,
44 'image': IMAGE_MAP[r],
45 'zone': 'any',
46 'size': 'm1.medium'})})
47 return flavors
48
49
50 def build_layer(layer):
51 """
52 Build a layer.
53
54 :param layer: a dict containing formation, id, params, and creds info
55 """
56 region = layer['params'].get('region', 'us-east-1')
57 conn = _create_ec2_connection(layer['creds'], region)
58 # create a new sg and authorize all ports
59 # use iptables on the host to firewall ports
60 name = "{formation}-{id}".format(**layer)
61 sg = conn.create_security_group(name, 'Created by Deis')
62 # import a new keypair using the layer key material
63 conn.import_key_pair(name, layer['ssh_public_key'])
64 # loop until the sg is *actually* there
65 for i in xrange(10):
66 try:
67 sg.authorize(ip_protocol='tcp', from_port=1, to_port=65535,
68 cidr_ip='0.0.0.0/0')
69 break
70 except EC2ResponseError:
71 if i < 10:
72 time.sleep(1.5)
73 continue
74 else:
75 raise RuntimeError('Failed to authorize security group')
76
77
78 def destroy_layer(layer):
79 """
80 Destroy a layer.
81
82 :param layer: a dict containing formation, id, params, and creds info
83 """
84 region = layer['params'].get('region', 'us-east-1')
85 name = "{formation}-{id}".format(**layer)
86 conn = _create_ec2_connection(layer['creds'], region)
87 conn.delete_key_pair(name)
88 # there's an ec2 race condition on instances terminating
89 # successfully but still holding a lock on the security group
90 # let's take a nap
91 time.sleep(5)
92 try:
93 conn.delete_security_group(name)
94 except EC2ResponseError as e:
95 if e.code != 'InvalidGroup.NotFound':
96 raise e
97
98
99 def build_node(node):
100 """
101 Build a node.
102
103 :param node: a dict containing formation, layer, params, and creds info.
104 :rtype: a tuple of (provider_id, fully_qualified_domain_name, metadata)
105 """
106 params, creds = node['params'], node['creds']
107 region = params.setdefault('region', 'us-east-1')
108 conn = _create_ec2_connection(creds, region)
109 name = "{formation}-{layer}".format(**node)
110 params['key_name'] = name
111 sg = conn.get_all_security_groups(name)[0]
112 params.setdefault('security_groups', []).append(sg.name)
113 image_id = params.get(
114 'image', getattr(settings, 'IMAGE_MAP', IMAGE_MAP)[region])
115 images = conn.get_all_images([image_id])
116 if len(images) != 1:
117 raise LookupError('Could not find AMI: %s' % image_id)
118 image = images[0]
119 kwargs = _prepare_run_kwargs(params)
120 reservation = image.run(**kwargs)
121 instances = reservation.instances
122 boto = instances[0]
123 # sleep before tagging
124 time.sleep(10)
125 boto.update()
126 boto.add_tag('Name', node['id'])
127 # loop until running
128 while(True):
129 time.sleep(2)
130 boto.update()
131 if boto.state == 'running':
132 break
133 # prepare return values
134 provider_id = boto.id
135 fqdn = boto.public_dns_name
136 metadata = _format_metadata(boto)
137 return provider_id, fqdn, metadata
138
139
140 def destroy_node(node):
141 """
142 Destroy a node.
143
144 :param node: a dict containing a node's provider_id, params, and creds
145 """
146 provider_id = node['provider_id']
147 region = node['params'].get('region', 'us-east-1')
148 conn = _create_ec2_connection(node['creds'], region)
149 if provider_id:
150 conn.terminate_instances([provider_id])
151 i = conn.get_all_instances([provider_id])[0].instances[0]
152 while(True):
153 time.sleep(2)
154 i.update()
155 if i.state == "terminated":
156 break
157
158
159 def _create_ec2_connection(creds, region):
160 """
161 Connect to an EC2 region with the given credentials.
162
163 :param creds: a dict containing an EC2 access_key and secret_key
164 :region: the name of an EC2 region, such as "us-west-2"
165 :rtype: a connected :class:`~boto.ec2.connection.EC2Connection`
166 :raises EnvironmentError: if no credentials are provided
167 """
168 if not creds:
169 raise EnvironmentError('No credentials provided')
170 return ec2.connect_to_region(region,
171 aws_access_key_id=creds['access_key'],
172 aws_secret_access_key=creds['secret_key'])
173
174
175 def _prepare_run_kwargs(params):
176 # start with sane defaults
177 kwargs = {
178 'min_count': 1, 'max_count': 1,
179 'user_data': None, 'addressing_type': None,
180 'instance_type': None, 'placement': None,
181 'kernel_id': None, 'ramdisk_id': None,
182 'monitoring_enabled': False, 'subnet_id': None,
183 'block_device_map': None,
184 }
185 # convert zone "any" to NoneType
186 requested_zone = params.get('zone')
187 if requested_zone and requested_zone.lower() == 'any':
188 requested_zone = None
189 # lookup kwargs from params
190 param_kwargs = {
191 'instance_type': params.get('size', 'm1.medium'),
192 'security_groups': params['security_groups'],
193 'placement': requested_zone,
194 'key_name': params['key_name'],
195 'kernel_id': params.get('kernel', None),
196 }
197 # add user_data if provided in params
198 user_data = params.get('user_data')
199 if user_data:
200 kwargs.update({'user_data': user_data})
201 # params override defaults
202 kwargs.update(param_kwargs)
203 return kwargs
204
205
206 def _format_metadata(boto):
207 return {
208 'architecture': boto.architecture,
209 'block_device_mapping': {
210 k: v.volume_id for k, v in boto.block_device_mapping.items()
211 },
212 'client_token': boto.client_token,
213 'dns_name': boto.dns_name,
214 'ebs_optimized': boto.ebs_optimized,
215 'eventsSet': boto.eventsSet,
216 'group_name': boto.group_name,
217 'groups': [g.id for g in boto.groups],
218 'hypervisor': boto.hypervisor,
219 'id': boto.id,
220 'image_id': boto.image_id,
221 'instance_profile': boto.instance_profile,
222 'instance_type': boto.instance_type,
223 'interfaces': list(boto.interfaces),
224 'ip_address': boto.ip_address,
225 'kernel': boto.kernel,
226 'key_name': boto.key_name,
227 'launch_time': boto.launch_time,
228 'monitored': boto.monitored,
229 'monitoring_state': boto.monitoring_state,
230 'persistent': boto.persistent,
231 'placement': boto.placement,
232 'placement_group': boto.placement_group,
233 'placement_tenancy': boto.placement_tenancy,
234 'previous_state': boto.previous_state,
235 'private_dns_name': boto.private_dns_name,
236 'private_ip_address': boto.private_ip_address,
237 'public_dns_name': boto.public_dns_name,
238 'ramdisk': boto.ramdisk,
239 'region': boto.region.name,
240 'root_device_name': boto.root_device_name,
241 'root_device_type': boto.root_device_type,
242 'spot_instance_request_id': boto.spot_instance_request_id,
243 'state': boto.state,
244 'state_code': boto.state_code,
245 'state_reason': boto.state_reason,
246 'subnet_id': boto.subnet_id,
247 'tags': dict(boto.tags),
248 'virtualization_type': boto.virtualization_type,
249 'vpc_id': boto.vpc_id,
250 }
251
[end of provider/ec2.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/provider/ec2.py b/provider/ec2.py
--- a/provider/ec2.py
+++ b/provider/ec2.py
@@ -17,14 +17,14 @@
# Deis-optimized EC2 amis -- with 3.8 kernel, chef 11 deps,
# and large docker images (e.g. buildstep) pre-installed
IMAGE_MAP = {
- 'ap-northeast-1': 'ami-0b45de0a',
- 'ap-southeast-1': 'ami-eaf9b3b8',
- 'ap-southeast-2': 'ami-e9970bd3',
- 'eu-west-1': 'ami-c021c1b7',
- 'sa-east-1': 'ami-b5da7ca8',
- 'us-east-1': 'ami-a30b57ca',
- 'us-west-1': 'ami-30e3d475',
- 'us-west-2': 'ami-ca63fafa',
+ 'ap-northeast-1': 'ami-39bfda38',
+ 'ap-southeast-1': 'ami-c0613492',
+ 'ap-southeast-2': 'ami-9741ddad',
+ 'eu-west-1': 'ami-39bc5e4e',
+ 'sa-east-1': 'ami-0775d31a',
+ 'us-east-1': 'ami-fa99c193',
+ 'us-west-1': 'ami-802412c5',
+ 'us-west-2': 'ami-0e7be33e',
}
|
{"golden_diff": "diff --git a/provider/ec2.py b/provider/ec2.py\n--- a/provider/ec2.py\n+++ b/provider/ec2.py\n@@ -17,14 +17,14 @@\n # Deis-optimized EC2 amis -- with 3.8 kernel, chef 11 deps,\n # and large docker images (e.g. buildstep) pre-installed\n IMAGE_MAP = {\n- 'ap-northeast-1': 'ami-0b45de0a',\n- 'ap-southeast-1': 'ami-eaf9b3b8',\n- 'ap-southeast-2': 'ami-e9970bd3',\n- 'eu-west-1': 'ami-c021c1b7',\n- 'sa-east-1': 'ami-b5da7ca8',\n- 'us-east-1': 'ami-a30b57ca',\n- 'us-west-1': 'ami-30e3d475',\n- 'us-west-2': 'ami-ca63fafa',\n+ 'ap-northeast-1': 'ami-39bfda38',\n+ 'ap-southeast-1': 'ami-c0613492',\n+ 'ap-southeast-2': 'ami-9741ddad',\n+ 'eu-west-1': 'ami-39bc5e4e',\n+ 'sa-east-1': 'ami-0775d31a',\n+ 'us-east-1': 'ami-fa99c193',\n+ 'us-west-1': 'ami-802412c5',\n+ 'us-west-2': 'ami-0e7be33e',\n }\n", "issue": "Refresh EC2 AMIs for 0.2.0 release\nHaving the AMIs up-to-date speeds node scaling time somewhat, and it's a slow but mostly-automated process so let's do this each release.\n\n(Ditto for Rackspace, Vagrant .box images on S3, and DigitalOcean when we get there for each of them...)\n\n", "before_files": [{"content": "\"\"\"\nDeis cloud provider implementation for Amazon EC2.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport json\nimport time\n\nfrom boto import ec2\nfrom boto.exception import EC2ResponseError\n\n# from api.ssh import connect_ssh, exec_ssh\nfrom deis import settings\n\n\n# Deis-optimized EC2 amis -- with 3.8 kernel, chef 11 deps,\n# and large docker images (e.g. buildstep) pre-installed\nIMAGE_MAP = {\n 'ap-northeast-1': 'ami-0b45de0a',\n 'ap-southeast-1': 'ami-eaf9b3b8',\n 'ap-southeast-2': 'ami-e9970bd3',\n 'eu-west-1': 'ami-c021c1b7',\n 'sa-east-1': 'ami-b5da7ca8',\n 'us-east-1': 'ami-a30b57ca',\n 'us-west-1': 'ami-30e3d475',\n 'us-west-2': 'ami-ca63fafa',\n}\n\n\ndef seed_flavors():\n \"\"\"Seed the database with default flavors for each EC2 region.\n\n :rtype: list of dicts containing flavor data\n \"\"\"\n flavors = []\n for r in ('us-east-1', 'us-west-1', 'us-west-2', 'eu-west-1',\n 'ap-northeast-1', 'ap-southeast-1', 'ap-southeast-2',\n 'sa-east-1'):\n flavors.append({'id': 'ec2-{}'.format(r),\n 'provider': 'ec2',\n 'params': json.dumps({\n 'region': r,\n 'image': IMAGE_MAP[r],\n 'zone': 'any',\n 'size': 'm1.medium'})})\n return flavors\n\n\ndef build_layer(layer):\n \"\"\"\n Build a layer.\n\n :param layer: a dict containing formation, id, params, and creds info\n \"\"\"\n region = layer['params'].get('region', 'us-east-1')\n conn = _create_ec2_connection(layer['creds'], region)\n # create a new sg and authorize all ports\n # use iptables on the host to firewall ports\n name = \"{formation}-{id}\".format(**layer)\n sg = conn.create_security_group(name, 'Created by Deis')\n # import a new keypair using the layer key material\n conn.import_key_pair(name, layer['ssh_public_key'])\n # loop until the sg is *actually* there\n for i in xrange(10):\n try:\n sg.authorize(ip_protocol='tcp', from_port=1, to_port=65535,\n cidr_ip='0.0.0.0/0')\n break\n except EC2ResponseError:\n if i < 10:\n time.sleep(1.5)\n continue\n else:\n raise RuntimeError('Failed to authorize security group')\n\n\ndef destroy_layer(layer):\n \"\"\"\n Destroy a layer.\n\n :param layer: a dict containing formation, id, params, and creds info\n \"\"\"\n region = layer['params'].get('region', 'us-east-1')\n name = \"{formation}-{id}\".format(**layer)\n conn = _create_ec2_connection(layer['creds'], region)\n conn.delete_key_pair(name)\n # there's an ec2 race condition on instances terminating\n # successfully but still holding a lock on the security group\n # let's take a nap\n time.sleep(5)\n try:\n conn.delete_security_group(name)\n except EC2ResponseError as e:\n if e.code != 'InvalidGroup.NotFound':\n raise e\n\n\ndef build_node(node):\n \"\"\"\n Build a node.\n\n :param node: a dict containing formation, layer, params, and creds info.\n :rtype: a tuple of (provider_id, fully_qualified_domain_name, metadata)\n \"\"\"\n params, creds = node['params'], node['creds']\n region = params.setdefault('region', 'us-east-1')\n conn = _create_ec2_connection(creds, region)\n name = \"{formation}-{layer}\".format(**node)\n params['key_name'] = name\n sg = conn.get_all_security_groups(name)[0]\n params.setdefault('security_groups', []).append(sg.name)\n image_id = params.get(\n 'image', getattr(settings, 'IMAGE_MAP', IMAGE_MAP)[region])\n images = conn.get_all_images([image_id])\n if len(images) != 1:\n raise LookupError('Could not find AMI: %s' % image_id)\n image = images[0]\n kwargs = _prepare_run_kwargs(params)\n reservation = image.run(**kwargs)\n instances = reservation.instances\n boto = instances[0]\n # sleep before tagging\n time.sleep(10)\n boto.update()\n boto.add_tag('Name', node['id'])\n # loop until running\n while(True):\n time.sleep(2)\n boto.update()\n if boto.state == 'running':\n break\n # prepare return values\n provider_id = boto.id\n fqdn = boto.public_dns_name\n metadata = _format_metadata(boto)\n return provider_id, fqdn, metadata\n\n\ndef destroy_node(node):\n \"\"\"\n Destroy a node.\n\n :param node: a dict containing a node's provider_id, params, and creds\n \"\"\"\n provider_id = node['provider_id']\n region = node['params'].get('region', 'us-east-1')\n conn = _create_ec2_connection(node['creds'], region)\n if provider_id:\n conn.terminate_instances([provider_id])\n i = conn.get_all_instances([provider_id])[0].instances[0]\n while(True):\n time.sleep(2)\n i.update()\n if i.state == \"terminated\":\n break\n\n\ndef _create_ec2_connection(creds, region):\n \"\"\"\n Connect to an EC2 region with the given credentials.\n\n :param creds: a dict containing an EC2 access_key and secret_key\n :region: the name of an EC2 region, such as \"us-west-2\"\n :rtype: a connected :class:`~boto.ec2.connection.EC2Connection`\n :raises EnvironmentError: if no credentials are provided\n \"\"\"\n if not creds:\n raise EnvironmentError('No credentials provided')\n return ec2.connect_to_region(region,\n aws_access_key_id=creds['access_key'],\n aws_secret_access_key=creds['secret_key'])\n\n\ndef _prepare_run_kwargs(params):\n # start with sane defaults\n kwargs = {\n 'min_count': 1, 'max_count': 1,\n 'user_data': None, 'addressing_type': None,\n 'instance_type': None, 'placement': None,\n 'kernel_id': None, 'ramdisk_id': None,\n 'monitoring_enabled': False, 'subnet_id': None,\n 'block_device_map': None,\n }\n # convert zone \"any\" to NoneType\n requested_zone = params.get('zone')\n if requested_zone and requested_zone.lower() == 'any':\n requested_zone = None\n # lookup kwargs from params\n param_kwargs = {\n 'instance_type': params.get('size', 'm1.medium'),\n 'security_groups': params['security_groups'],\n 'placement': requested_zone,\n 'key_name': params['key_name'],\n 'kernel_id': params.get('kernel', None),\n }\n # add user_data if provided in params\n user_data = params.get('user_data')\n if user_data:\n kwargs.update({'user_data': user_data})\n # params override defaults\n kwargs.update(param_kwargs)\n return kwargs\n\n\ndef _format_metadata(boto):\n return {\n 'architecture': boto.architecture,\n 'block_device_mapping': {\n k: v.volume_id for k, v in boto.block_device_mapping.items()\n },\n 'client_token': boto.client_token,\n 'dns_name': boto.dns_name,\n 'ebs_optimized': boto.ebs_optimized,\n 'eventsSet': boto.eventsSet,\n 'group_name': boto.group_name,\n 'groups': [g.id for g in boto.groups],\n 'hypervisor': boto.hypervisor,\n 'id': boto.id,\n 'image_id': boto.image_id,\n 'instance_profile': boto.instance_profile,\n 'instance_type': boto.instance_type,\n 'interfaces': list(boto.interfaces),\n 'ip_address': boto.ip_address,\n 'kernel': boto.kernel,\n 'key_name': boto.key_name,\n 'launch_time': boto.launch_time,\n 'monitored': boto.monitored,\n 'monitoring_state': boto.monitoring_state,\n 'persistent': boto.persistent,\n 'placement': boto.placement,\n 'placement_group': boto.placement_group,\n 'placement_tenancy': boto.placement_tenancy,\n 'previous_state': boto.previous_state,\n 'private_dns_name': boto.private_dns_name,\n 'private_ip_address': boto.private_ip_address,\n 'public_dns_name': boto.public_dns_name,\n 'ramdisk': boto.ramdisk,\n 'region': boto.region.name,\n 'root_device_name': boto.root_device_name,\n 'root_device_type': boto.root_device_type,\n 'spot_instance_request_id': boto.spot_instance_request_id,\n 'state': boto.state,\n 'state_code': boto.state_code,\n 'state_reason': boto.state_reason,\n 'subnet_id': boto.subnet_id,\n 'tags': dict(boto.tags),\n 'virtualization_type': boto.virtualization_type,\n 'vpc_id': boto.vpc_id,\n }\n", "path": "provider/ec2.py"}]}
| 3,357 | 394 |
gh_patches_debug_8039
|
rasdani/github-patches
|
git_diff
|
docker__docker-py-1399
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
docker-py installation breaks docker-compose
im not quite sure if this is correct, but trying to install `docker-py` through pip after i've installed `docker-compose` breaks `docker-compose` with
```
Traceback (most recent call last):
File "/usr/local/bin/docker-compose", line 7, in <module>
from compose.cli.main import main
File "/usr/local/lib/python2.7/site-packages/compose/cli/main.py", line 20, in <module>
from ..bundle import get_image_digests
File "/usr/local/lib/python2.7/site-packages/compose/bundle.py", line 13, in <module>
from .network import get_network_defs_for_service
File "/usr/local/lib/python2.7/site-packages/compose/network.py", line 7, in <module>
from docker.types import IPAMConfig
ImportError: cannot import name IPAMConfig
```
To fix that error, i just need to do the installations in this order:
```
pip install docker-py
pip install docker-compose
```
gist:
https://gist.github.com/serialdoom/3a443c420aa29f9422f8c5fc73f46602
python/pip versions tried:
```
docker run -it python:2.7.13 bash -c 'pip --version'
pip 9.0.1 from /usr/local/lib/python2.7/site-packages (python 2.7)
docker run -it python:2.7.12 bash -c 'pip --version'
pip 8.1.2 from /usr/local/lib/python2.7/site-packages (python 2.7)
```
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import codecs
3 import os
4 import sys
5
6 from setuptools import setup, find_packages
7
8
9 ROOT_DIR = os.path.dirname(__file__)
10 SOURCE_DIR = os.path.join(ROOT_DIR)
11
12 requirements = [
13 'requests >= 2.5.2, != 2.11.0, != 2.12.2',
14 'six >= 1.4.0',
15 'websocket-client >= 0.32.0',
16 'docker-pycreds >= 0.2.1'
17 ]
18
19 if sys.platform == 'win32':
20 requirements.append('pypiwin32 >= 219')
21
22 extras_require = {
23 ':python_version < "3.5"': 'backports.ssl_match_hostname >= 3.5',
24 # While not imported explicitly, the ipaddress module is required for
25 # ssl_match_hostname to verify hosts match with certificates via
26 # ServerAltname: https://pypi.python.org/pypi/backports.ssl_match_hostname
27 ':python_version < "3.3"': 'ipaddress >= 1.0.16',
28 }
29
30 version = None
31 exec(open('docker/version.py').read())
32
33 with open('./test-requirements.txt') as test_reqs_txt:
34 test_requirements = [line for line in test_reqs_txt]
35
36
37 long_description = ''
38 try:
39 with codecs.open('./README.rst', encoding='utf-8') as readme_rst:
40 long_description = readme_rst.read()
41 except IOError:
42 # README.rst is only generated on release. Its absence should not prevent
43 # setup.py from working properly.
44 pass
45
46 setup(
47 name="docker",
48 version=version,
49 description="A Python library for the Docker Engine API.",
50 long_description=long_description,
51 url='https://github.com/docker/docker-py',
52 packages=find_packages(exclude=["tests.*", "tests"]),
53 install_requires=requirements,
54 tests_require=test_requirements,
55 extras_require=extras_require,
56 zip_safe=False,
57 test_suite='tests',
58 classifiers=[
59 'Development Status :: 5 - Production/Stable',
60 'Environment :: Other Environment',
61 'Intended Audience :: Developers',
62 'Operating System :: OS Independent',
63 'Programming Language :: Python',
64 'Programming Language :: Python :: 2',
65 'Programming Language :: Python :: 2.7',
66 'Programming Language :: Python :: 3',
67 'Programming Language :: Python :: 3.3',
68 'Programming Language :: Python :: 3.4',
69 'Programming Language :: Python :: 3.5',
70 'Topic :: Utilities',
71 'License :: OSI Approved :: Apache Software License',
72 ],
73 maintainer='Joffrey F',
74 maintainer_email='[email protected]',
75 )
76
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,10 +1,20 @@
#!/usr/bin/env python
+from __future__ import print_function
+
import codecs
import os
import sys
+import pip
+
from setuptools import setup, find_packages
+if 'docker-py' in [x.project_name for x in pip.get_installed_distributions()]:
+ print(
+ 'ERROR: "docker-py" needs to be uninstalled before installing this'
+ ' package:\npip uninstall docker-py', file=sys.stderr
+ )
+ sys.exit(1)
ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,10 +1,20 @@\n #!/usr/bin/env python\n+from __future__ import print_function\n+\n import codecs\n import os\n import sys\n \n+import pip\n+\n from setuptools import setup, find_packages\n \n+if 'docker-py' in [x.project_name for x in pip.get_installed_distributions()]:\n+ print(\n+ 'ERROR: \"docker-py\" needs to be uninstalled before installing this'\n+ ' package:\\npip uninstall docker-py', file=sys.stderr\n+ )\n+ sys.exit(1)\n \n ROOT_DIR = os.path.dirname(__file__)\n SOURCE_DIR = os.path.join(ROOT_DIR)\n", "issue": "docker-py installation breaks docker-compose\nim not quite sure if this is correct, but trying to install `docker-py` through pip after i've installed `docker-compose` breaks `docker-compose` with\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/docker-compose\", line 7, in <module>\r\n from compose.cli.main import main\r\n File \"/usr/local/lib/python2.7/site-packages/compose/cli/main.py\", line 20, in <module>\r\n from ..bundle import get_image_digests\r\n File \"/usr/local/lib/python2.7/site-packages/compose/bundle.py\", line 13, in <module>\r\n from .network import get_network_defs_for_service\r\n File \"/usr/local/lib/python2.7/site-packages/compose/network.py\", line 7, in <module>\r\n from docker.types import IPAMConfig\r\nImportError: cannot import name IPAMConfig\r\n```\r\n\r\nTo fix that error, i just need to do the installations in this order:\r\n```\r\npip install docker-py\r\npip install docker-compose\r\n```\r\n\r\n\r\ngist:\r\nhttps://gist.github.com/serialdoom/3a443c420aa29f9422f8c5fc73f46602\r\n\r\npython/pip versions tried:\r\n```\r\ndocker run -it python:2.7.13 bash -c 'pip --version'\r\npip 9.0.1 from /usr/local/lib/python2.7/site-packages (python 2.7)\r\ndocker run -it python:2.7.12 bash -c 'pip --version'\r\npip 8.1.2 from /usr/local/lib/python2.7/site-packages (python 2.7)\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\nimport codecs\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\n\nrequirements = [\n 'requests >= 2.5.2, != 2.11.0, != 2.12.2',\n 'six >= 1.4.0',\n 'websocket-client >= 0.32.0',\n 'docker-pycreds >= 0.2.1'\n]\n\nif sys.platform == 'win32':\n requirements.append('pypiwin32 >= 219')\n\nextras_require = {\n ':python_version < \"3.5\"': 'backports.ssl_match_hostname >= 3.5',\n # While not imported explicitly, the ipaddress module is required for\n # ssl_match_hostname to verify hosts match with certificates via\n # ServerAltname: https://pypi.python.org/pypi/backports.ssl_match_hostname\n ':python_version < \"3.3\"': 'ipaddress >= 1.0.16',\n}\n\nversion = None\nexec(open('docker/version.py').read())\n\nwith open('./test-requirements.txt') as test_reqs_txt:\n test_requirements = [line for line in test_reqs_txt]\n\n\nlong_description = ''\ntry:\n with codecs.open('./README.rst', encoding='utf-8') as readme_rst:\n long_description = readme_rst.read()\nexcept IOError:\n # README.rst is only generated on release. Its absence should not prevent\n # setup.py from working properly.\n pass\n\nsetup(\n name=\"docker\",\n version=version,\n description=\"A Python library for the Docker Engine API.\",\n long_description=long_description,\n url='https://github.com/docker/docker-py',\n packages=find_packages(exclude=[\"tests.*\", \"tests\"]),\n install_requires=requirements,\n tests_require=test_requirements,\n extras_require=extras_require,\n zip_safe=False,\n test_suite='tests',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License',\n ],\n maintainer='Joffrey F',\n maintainer_email='[email protected]',\n)\n", "path": "setup.py"}]}
| 1,639 | 161 |
gh_patches_debug_12238
|
rasdani/github-patches
|
git_diff
|
google__fuzzbench-1604
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
trials not launching in local experiment
I'm running into an issue running local experiments: the dispatcher fails to launch any of the individual trials. Even waiting for hours, no trials are launched, though the system appears to still be checking for coverage data in the measurement loop from the logs.
This happens every time I try to run a local experiment.
I've checked `/tmp` in the dispatcher container during one of these failed runs, and there are no logs from any of the runner startup scripts, nor do the runner containers appear to ever start. However, if I then manually run the startup script in the dispatcher container (e.g. `docker exec -it dispatcher-container /bin/bash; /tmp/r-startup-script3.sh`) the trial / runner container starts up with no issues.
I've tried adding some logging to the python code that starts the runners but everything looked fine from that end and I was not able to figure out what the cause of the issue is.
I've been able to reproduce this issue on multiple machines running recent versions of Ubuntu. The only changes I made to master were to add `rsync` to the relevant dockerfiles to work around: https://github.com/google/fuzzbench/issues/1593
OS: Ubuntu (e.g. 22.04)
Docker version: e.g. 20.10.22
Commit: aa6ddd05
Reproduction:
1. add rsync to Dockerfiles;
2. make;
3. docker build the dispatcher image;
4. run an experiment with basic config e.g. [experiment-config.yaml.txt](https://github.com/google/fuzzbench/files/10322923/experiment-config.yaml.txt) using command such as
[exp.sh.txt](https://github.com/google/fuzzbench/files/10322924/exp.sh.txt)
Any insight would be much appreciated! And I'd be happy to provide logs / additional information as needed.
Thanks!
</issue>
<code>
[start of common/gcloud.py]
1 # Copyright 2020 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Google cloud related code."""
15
16 import enum
17 import posixpath
18 import subprocess
19 from typing import List, Optional
20
21 from common import experiment_utils
22 from common import logs
23 from common import new_process
24
25 # Constants for dispatcher specs.
26 DISPATCHER_MACHINE_TYPE = 'n1-highmem-96'
27 DISPATCHER_BOOT_DISK_SIZE = '4TB'
28 DISPATCHER_BOOT_DISK_TYPE = 'pd-ssd'
29
30 # Constants for runner specs.
31 RUNNER_BOOT_DISK_SIZE = '200GB'
32
33 # Constants for measurer worker specs.
34 MEASURER_WORKER_MACHINE_TYPE = 'n1-standard-1'
35 MEASURER_WORKER_BOOT_DISK_SIZE = '50GB'
36
37 # Number of instances to process at once.
38 INSTANCE_BATCH_SIZE = 100
39
40
41 class InstanceType(enum.Enum):
42 """Types of instances we need for the experiment."""
43 DISPATCHER = 0
44 RUNNER = 1
45
46
47 def create_instance(instance_name: str,
48 instance_type: InstanceType,
49 config: dict,
50 startup_script: Optional[str] = None,
51 preemptible: bool = False,
52 **kwargs) -> bool:
53 """Creates a GCE instance with name, |instance_name|, type, |instance_type|
54 and with optionally provided and |startup_script|."""
55
56 if experiment_utils.is_local_experiment():
57 return run_local_instance(startup_script)
58
59 command = [
60 'gcloud',
61 'compute',
62 'instances',
63 'create',
64 instance_name,
65 '--image-family=cos-stable',
66 '--image-project=cos-cloud',
67 f'--zone={config["cloud_compute_zone"]}',
68 '--scopes=cloud-platform',
69 ]
70 if instance_type == InstanceType.DISPATCHER:
71 command.extend([
72 f'--machine-type={DISPATCHER_MACHINE_TYPE}',
73 f'--boot-disk-size={DISPATCHER_BOOT_DISK_SIZE}',
74 f'--boot-disk-type={DISPATCHER_BOOT_DISK_TYPE}',
75 ])
76 else:
77 machine_type = config['runner_machine_type']
78 if machine_type is not None:
79 command.append(f'--machine-type={machine_type}')
80 else:
81 # Do this to support KLEE experiments.
82 command.append([
83 f'--custom-memory={config["runner_memory"]}',
84 f'--custom-cpu={config["runner_num_cpu_cores"]}',
85 ])
86
87 command.extend([
88 '--no-address',
89 f'--boot-disk-size={RUNNER_BOOT_DISK_SIZE}',
90 ])
91
92 if preemptible:
93 command.append('--preemptible')
94 if startup_script:
95 command.extend(
96 ['--metadata-from-file', f'startup-script={startup_script}'])
97
98 result = new_process.execute(command, expect_zero=False, **kwargs)
99 if result.retcode == 0:
100 return True
101
102 logs.info('Failed to create instance. Command: %s failed. Output: %s',
103 command, result.output)
104 return False
105
106
107 def delete_instances(instance_names: List[str], zone: str, **kwargs) -> bool:
108 """Delete gcloud instance |instance_names|. Returns true if the operation
109 succeeded or false otherwise."""
110 error_occurred = False
111 # Delete instances in batches, otherwise we run into rate limit errors.
112 for idx in range(0, len(instance_names), INSTANCE_BATCH_SIZE):
113 # -q is needed otherwise gcloud will prompt "Y/N?".
114 command = ['gcloud', 'compute', 'instances', 'delete', '-q']
115 command.extend(instance_names[idx:idx + INSTANCE_BATCH_SIZE])
116 command.extend(['--zone', zone])
117 result = new_process.execute(command, expect_zero=False, **kwargs)
118 error_occurred = error_occurred or result.retcode != 0
119
120 return not error_occurred
121
122
123 def set_default_project(cloud_project: str):
124 """Set default project for future gcloud and gsutil commands."""
125 return new_process.execute(
126 ['gcloud', 'config', 'set', 'project', cloud_project])
127
128
129 def run_local_instance(startup_script: Optional[str] = None) -> bool:
130 """Does the equivalent of "create_instance" for local experiments, runs
131 |startup_script| in the background."""
132 command = ['/bin/bash', startup_script]
133 with subprocess.Popen(command,
134 stdout=subprocess.PIPE,
135 stderr=subprocess.STDOUT):
136 return new_process.ProcessResult(0, '', False)
137
138
139 def create_instance_template(template_name, docker_image, env, project, zone):
140 """Returns a ProcessResult from running the command to create an instance
141 template."""
142 # Creating an instance template cannot be done using the GCE API because
143 # there is no public API for handling some docker related functionality that
144 # we need.
145 command = [
146 'gcloud', 'compute', '--project', project, 'instance-templates',
147 'create-with-container', template_name, '--no-address',
148 '--image-family=cos-stable', '--image-project=cos-cloud',
149 f'--region={zone}', '--scopes=cloud-platform',
150 f'--machine-type={MEASURER_WORKER_MACHINE_TYPE}',
151 f'--boot-disk-size={MEASURER_WORKER_BOOT_DISK_SIZE}', '--preemptible',
152 '--container-image', docker_image
153 ]
154 for item in env.items():
155 command.extend(['--container-env', f'{item[0]}={item[1]}'])
156 new_process.execute(command)
157 return posixpath.join('https://www.googleapis.com/compute/v1/projects/',
158 project, 'global', 'instanceTemplates', template_name)
159
160
161 def delete_instance_template(template_name: str):
162 """Returns a ProcessResult from running the command to delete the
163 measure_worker template for this |experiment|."""
164 command = [
165 'gcloud', 'compute', 'instance-templates', 'delete', template_name
166 ]
167 return new_process.execute(command)
168
169
170 def get_account():
171 """Returns the email address of the current account being used."""
172 return new_process.execute(['gcloud', 'config', 'get-value',
173 'account']).output.strip()
174
[end of common/gcloud.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/common/gcloud.py b/common/gcloud.py
--- a/common/gcloud.py
+++ b/common/gcloud.py
@@ -130,10 +130,9 @@
"""Does the equivalent of "create_instance" for local experiments, runs
|startup_script| in the background."""
command = ['/bin/bash', startup_script]
- with subprocess.Popen(command,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT):
- return new_process.ProcessResult(0, '', False)
+ # pylint: disable=consider-using-with
+ subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ return True
def create_instance_template(template_name, docker_image, env, project, zone):
|
{"golden_diff": "diff --git a/common/gcloud.py b/common/gcloud.py\n--- a/common/gcloud.py\n+++ b/common/gcloud.py\n@@ -130,10 +130,9 @@\n \"\"\"Does the equivalent of \"create_instance\" for local experiments, runs\n |startup_script| in the background.\"\"\"\n command = ['/bin/bash', startup_script]\n- with subprocess.Popen(command,\n- stdout=subprocess.PIPE,\n- stderr=subprocess.STDOUT):\n- return new_process.ProcessResult(0, '', False)\n+ # pylint: disable=consider-using-with\n+ subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n+ return True\n \n \n def create_instance_template(template_name, docker_image, env, project, zone):\n", "issue": "trials not launching in local experiment\nI'm running into an issue running local experiments: the dispatcher fails to launch any of the individual trials. Even waiting for hours, no trials are launched, though the system appears to still be checking for coverage data in the measurement loop from the logs.\r\n\r\nThis happens every time I try to run a local experiment.\r\n\r\nI've checked `/tmp` in the dispatcher container during one of these failed runs, and there are no logs from any of the runner startup scripts, nor do the runner containers appear to ever start. However, if I then manually run the startup script in the dispatcher container (e.g. `docker exec -it dispatcher-container /bin/bash; /tmp/r-startup-script3.sh`) the trial / runner container starts up with no issues.\r\n\r\nI've tried adding some logging to the python code that starts the runners but everything looked fine from that end and I was not able to figure out what the cause of the issue is.\r\n\r\nI've been able to reproduce this issue on multiple machines running recent versions of Ubuntu. The only changes I made to master were to add `rsync` to the relevant dockerfiles to work around: https://github.com/google/fuzzbench/issues/1593\r\n\r\nOS: Ubuntu (e.g. 22.04)\r\nDocker version: e.g. 20.10.22\r\nCommit: aa6ddd05\r\nReproduction: \r\n1. add rsync to Dockerfiles; \r\n2. make; \r\n3. docker build the dispatcher image; \r\n4. run an experiment with basic config e.g. [experiment-config.yaml.txt](https://github.com/google/fuzzbench/files/10322923/experiment-config.yaml.txt) using command such as\r\n[exp.sh.txt](https://github.com/google/fuzzbench/files/10322924/exp.sh.txt)\r\n\r\nAny insight would be much appreciated! And I'd be happy to provide logs / additional information as needed.\r\n\r\nThanks!\n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Google cloud related code.\"\"\"\n\nimport enum\nimport posixpath\nimport subprocess\nfrom typing import List, Optional\n\nfrom common import experiment_utils\nfrom common import logs\nfrom common import new_process\n\n# Constants for dispatcher specs.\nDISPATCHER_MACHINE_TYPE = 'n1-highmem-96'\nDISPATCHER_BOOT_DISK_SIZE = '4TB'\nDISPATCHER_BOOT_DISK_TYPE = 'pd-ssd'\n\n# Constants for runner specs.\nRUNNER_BOOT_DISK_SIZE = '200GB'\n\n# Constants for measurer worker specs.\nMEASURER_WORKER_MACHINE_TYPE = 'n1-standard-1'\nMEASURER_WORKER_BOOT_DISK_SIZE = '50GB'\n\n# Number of instances to process at once.\nINSTANCE_BATCH_SIZE = 100\n\n\nclass InstanceType(enum.Enum):\n \"\"\"Types of instances we need for the experiment.\"\"\"\n DISPATCHER = 0\n RUNNER = 1\n\n\ndef create_instance(instance_name: str,\n instance_type: InstanceType,\n config: dict,\n startup_script: Optional[str] = None,\n preemptible: bool = False,\n **kwargs) -> bool:\n \"\"\"Creates a GCE instance with name, |instance_name|, type, |instance_type|\n and with optionally provided and |startup_script|.\"\"\"\n\n if experiment_utils.is_local_experiment():\n return run_local_instance(startup_script)\n\n command = [\n 'gcloud',\n 'compute',\n 'instances',\n 'create',\n instance_name,\n '--image-family=cos-stable',\n '--image-project=cos-cloud',\n f'--zone={config[\"cloud_compute_zone\"]}',\n '--scopes=cloud-platform',\n ]\n if instance_type == InstanceType.DISPATCHER:\n command.extend([\n f'--machine-type={DISPATCHER_MACHINE_TYPE}',\n f'--boot-disk-size={DISPATCHER_BOOT_DISK_SIZE}',\n f'--boot-disk-type={DISPATCHER_BOOT_DISK_TYPE}',\n ])\n else:\n machine_type = config['runner_machine_type']\n if machine_type is not None:\n command.append(f'--machine-type={machine_type}')\n else:\n # Do this to support KLEE experiments.\n command.append([\n f'--custom-memory={config[\"runner_memory\"]}',\n f'--custom-cpu={config[\"runner_num_cpu_cores\"]}',\n ])\n\n command.extend([\n '--no-address',\n f'--boot-disk-size={RUNNER_BOOT_DISK_SIZE}',\n ])\n\n if preemptible:\n command.append('--preemptible')\n if startup_script:\n command.extend(\n ['--metadata-from-file', f'startup-script={startup_script}'])\n\n result = new_process.execute(command, expect_zero=False, **kwargs)\n if result.retcode == 0:\n return True\n\n logs.info('Failed to create instance. Command: %s failed. Output: %s',\n command, result.output)\n return False\n\n\ndef delete_instances(instance_names: List[str], zone: str, **kwargs) -> bool:\n \"\"\"Delete gcloud instance |instance_names|. Returns true if the operation\n succeeded or false otherwise.\"\"\"\n error_occurred = False\n # Delete instances in batches, otherwise we run into rate limit errors.\n for idx in range(0, len(instance_names), INSTANCE_BATCH_SIZE):\n # -q is needed otherwise gcloud will prompt \"Y/N?\".\n command = ['gcloud', 'compute', 'instances', 'delete', '-q']\n command.extend(instance_names[idx:idx + INSTANCE_BATCH_SIZE])\n command.extend(['--zone', zone])\n result = new_process.execute(command, expect_zero=False, **kwargs)\n error_occurred = error_occurred or result.retcode != 0\n\n return not error_occurred\n\n\ndef set_default_project(cloud_project: str):\n \"\"\"Set default project for future gcloud and gsutil commands.\"\"\"\n return new_process.execute(\n ['gcloud', 'config', 'set', 'project', cloud_project])\n\n\ndef run_local_instance(startup_script: Optional[str] = None) -> bool:\n \"\"\"Does the equivalent of \"create_instance\" for local experiments, runs\n |startup_script| in the background.\"\"\"\n command = ['/bin/bash', startup_script]\n with subprocess.Popen(command,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT):\n return new_process.ProcessResult(0, '', False)\n\n\ndef create_instance_template(template_name, docker_image, env, project, zone):\n \"\"\"Returns a ProcessResult from running the command to create an instance\n template.\"\"\"\n # Creating an instance template cannot be done using the GCE API because\n # there is no public API for handling some docker related functionality that\n # we need.\n command = [\n 'gcloud', 'compute', '--project', project, 'instance-templates',\n 'create-with-container', template_name, '--no-address',\n '--image-family=cos-stable', '--image-project=cos-cloud',\n f'--region={zone}', '--scopes=cloud-platform',\n f'--machine-type={MEASURER_WORKER_MACHINE_TYPE}',\n f'--boot-disk-size={MEASURER_WORKER_BOOT_DISK_SIZE}', '--preemptible',\n '--container-image', docker_image\n ]\n for item in env.items():\n command.extend(['--container-env', f'{item[0]}={item[1]}'])\n new_process.execute(command)\n return posixpath.join('https://www.googleapis.com/compute/v1/projects/',\n project, 'global', 'instanceTemplates', template_name)\n\n\ndef delete_instance_template(template_name: str):\n \"\"\"Returns a ProcessResult from running the command to delete the\n measure_worker template for this |experiment|.\"\"\"\n command = [\n 'gcloud', 'compute', 'instance-templates', 'delete', template_name\n ]\n return new_process.execute(command)\n\n\ndef get_account():\n \"\"\"Returns the email address of the current account being used.\"\"\"\n return new_process.execute(['gcloud', 'config', 'get-value',\n 'account']).output.strip()\n", "path": "common/gcloud.py"}]}
| 2,789 | 161 |
gh_patches_debug_17461
|
rasdani/github-patches
|
git_diff
|
DDMAL__CantusDB-1472
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"My sources" page and sidebar should be ordered by date updated
How are sources supposed to show up in "My sources"? I am an editor for some sources but they don't seem to be in "my sources", so what determines this?
</issue>
<code>
[start of django/cantusdb_project/main_app/templatetags/helper_tags.py]
1 import calendar
2 from typing import Union, Optional
3 from django import template
4 from main_app.models import Source
5 from articles.models import Article
6 from django.utils.safestring import mark_safe
7 from django.urls import reverse
8 from django.core.paginator import Paginator
9 from django.db.models import Q
10
11
12 register = template.Library()
13
14
15 @register.simple_tag(takes_context=False)
16 def recent_articles():
17 """
18 Generates a html unordered list of recent articles for display on the homepage
19
20 Used in:
21 templates/flatpages/default.html
22 """
23 articles = Article.objects.order_by("-date_created")[:5]
24 list_item_template = '<li style="padding-bottom: 0.5em;"><a href="{url}">{title}</a><br><small>{date}</small></li>'
25 list_items = [
26 list_item_template.format(
27 url=a.get_absolute_url(),
28 title=a.title,
29 date=a.date_created.strftime("%A %B %-d, %Y"),
30 )
31 for a in articles
32 ]
33 list_items_string = "".join(list_items)
34 recent_articles_string = "<ul>{lis}</ul>".format(lis=list_items_string)
35 return mark_safe(recent_articles_string)
36
37
38 @register.filter(name="month_to_string")
39 def month_to_string(value: Optional[Union[str, int]]) -> Optional[Union[str, int]]:
40 """
41 Converts month number to textual representation, 3 letters (Jan, Mar, etc)
42
43 used in:
44 main_app/templates/feast_detail.html
45 main_app/templates/feast_list.html
46 """
47 if type(value) == int and value in range(1, 13):
48 return calendar.month_abbr[value]
49 else:
50 return value
51
52
53 @register.simple_tag(takes_context=True)
54 def url_add_get_params(context, **kwargs):
55 """
56 accounts for the situations where there may be two paginations in one page
57
58 Used in:
59 main_app/templates/pagination.html
60 main_app/templates/user_source_list.html
61 """
62 query = context["request"].GET.copy()
63 if "page" in kwargs:
64 query.pop("page", None)
65 if "page2" in kwargs:
66 query.pop("page2", None)
67 query.update(kwargs)
68 return query.urlencode()
69
70
71 @register.simple_tag(takes_context=False)
72 def source_links():
73 """
74 Generates a series of html option tags linking to sources in Cantus Dabase, for display on the homepage
75
76 Used in:
77 templates/flatpages/default.html
78 """
79 sources = (
80 Source.objects.filter(published=True, segment__id=4063)
81 .exclude(siglum=None)
82 .values("siglum", "id")
83 .order_by("siglum")
84 )
85 options = ""
86 for source in sources:
87 option_str = (
88 f"<option value=source/{source['id']}>{source['siglum']}</option>\n"
89 )
90 options += option_str
91
92 return mark_safe(options)
93
94
95 @register.filter
96 def classname(obj):
97 """
98 Returns the name of the object's class
99 A use-case is: {% if object|classname == "Notation" %}
100
101 Used in:
102 main_app/templates/content_overview.html
103 """
104 return obj.__class__.__name__
105
106
107 @register.filter
108 def admin_url_name(class_name, action):
109 """
110 Accepts the name of a class in "main_app", and an action (either "change" or "delete") as arguments.
111 Returns the name of the URL for changing/deleting an object in the admin interface.
112
113 Used in:
114 main_app/templates/content_overview.html
115 """
116 class_name = class_name.lower()
117 action = action.lower()
118
119 return f"admin:main_app_{class_name}_{action}"
120
121
122 @register.filter(name="has_group")
123 def has_group(user, group_name):
124 """
125 Used in:
126 templates/base.html
127 """
128 return user.groups.filter(name=group_name).exists()
129
130
131 @register.simple_tag(takes_context=True)
132 def get_user_source_pagination(context):
133 user_created_sources = (
134 Source.objects.filter(
135 Q(current_editors=context["user"]) | Q(created_by=context["user"])
136 )
137 .order_by("-date_created")
138 .distinct()
139 )
140 paginator = Paginator(user_created_sources, 6)
141 page_number = context["request"].GET.get("page")
142 user_sources_page_obj = paginator.get_page(page_number)
143 return user_sources_page_obj
144
145
146 @register.simple_tag(takes_context=True)
147 def get_user_created_source_pagination(context):
148 user_created_sources = (
149 Source.objects.filter(created_by=context["user"])
150 .order_by("-date_created")
151 .distinct()
152 )
153 paginator = Paginator(user_created_sources, 6)
154 page_number = context["request"].GET.get("page2")
155 user_created_sources_page_obj = paginator.get_page(page_number)
156 return user_created_sources_page_obj
157
[end of django/cantusdb_project/main_app/templatetags/helper_tags.py]
[start of django/cantusdb_project/main_app/views/user.py]
1 from django.urls import reverse
2 from django.db.models.aggregates import Count
3 from django.views.generic import DetailView
4 from django.contrib.auth import get_user_model, login as auth_login
5 from main_app.models import Source
6 from django.views.generic import ListView
7 from django.contrib.auth.mixins import LoginRequiredMixin
8 from django.db.models import Q
9 from django.core.paginator import Paginator
10 from django.contrib.auth.views import LogoutView, LoginView
11 from django.contrib import messages
12 from extra_views import SearchableListMixin
13 from django.http import HttpResponseRedirect
14 from django.core.exceptions import PermissionDenied
15 from main_app.permissions import user_can_view_user_detail
16
17
18 class UserDetailView(DetailView):
19 """Detail view for User model
20
21 Accessed by /users/<pk>
22 """
23
24 model = get_user_model()
25 context_object_name = "user"
26 template_name = "user_detail.html"
27
28 def get_context_data(self, **kwargs):
29 user = self.get_object()
30 # to begin, if the person viewing the site is not logged in,
31 # they should only be able to view the detail pages of indexers,
32 # and not the detail pages of run-of-the-mill users
33 viewing_user = self.request.user
34 if not user_can_view_user_detail(viewing_user, user):
35 raise PermissionDenied()
36
37 context = super().get_context_data(**kwargs)
38 display_unpublished = viewing_user.is_authenticated
39 sort_by_siglum = lambda source: source.siglum
40 if display_unpublished:
41 context["inventoried_sources"] = sorted(
42 user.inventoried_sources.all(), key=sort_by_siglum
43 )
44 context["full_text_sources"] = sorted(
45 user.entered_full_text_for_sources.all(), key=sort_by_siglum
46 )
47 context["melody_sources"] = sorted(
48 user.entered_melody_for_sources.all(), key=sort_by_siglum
49 )
50 context["proofread_sources"] = sorted(
51 user.proofread_sources.all(), key=sort_by_siglum
52 )
53 context["edited_sources"] = sorted(
54 user.edited_sources.all(), key=sort_by_siglum
55 )
56 else:
57 context["inventoried_sources"] = sorted(
58 user.inventoried_sources.all().filter(published=True),
59 key=sort_by_siglum,
60 )
61 context["full_text_sources"] = sorted(
62 user.entered_full_text_for_sources.all().filter(published=True),
63 key=sort_by_siglum,
64 )
65 context["melody_sources"] = sorted(
66 user.entered_melody_for_sources.all().filter(published=True),
67 key=sort_by_siglum,
68 )
69 context["proofread_sources"] = sorted(
70 user.proofread_sources.all().filter(published=True), key=sort_by_siglum
71 )
72 context["edited_sources"] = sorted(
73 user.edited_sources.all().filter(published=True), key=sort_by_siglum
74 )
75
76 return context
77
78
79 class UserSourceListView(LoginRequiredMixin, ListView):
80 model = Source
81 context_object_name = "sources"
82 template_name = "user_source_list.html"
83
84 def get_context_data(self, **kwargs):
85 context = super().get_context_data(**kwargs)
86
87 my_sources = (
88 Source.objects.filter(
89 Q(current_editors=self.request.user)
90 | Q(created_by=self.request.user)
91 # | Q(inventoried_by=self.request.user)
92 # | Q(full_text_entered_by=self.request.user)
93 # | Q(melodies_entered_by=self.request.user)
94 # | Q(proofreaders=self.request.user)
95 # | Q(other_editors=self.request.user)
96 )
97 .order_by("-date_created")
98 .distinct()
99 )
100
101 user_sources_paginator = Paginator(my_sources, 10)
102 user_sources_page_num = self.request.GET.get("page")
103 user_sources_page_obj = user_sources_paginator.get_page(user_sources_page_num)
104
105 user_created_sources = (
106 Source.objects.filter(created_by=self.request.user)
107 .order_by("-date_created")
108 .distinct()
109 )
110 user_created_paginator = Paginator(user_created_sources, 6)
111 user_created_page_num = self.request.GET.get("page2")
112 user_created_page_obj = user_created_paginator.get_page(user_created_page_num)
113
114 context["page_obj"] = user_sources_page_obj
115 context["user_created_sources_page_obj"] = user_created_page_obj
116 return context
117
118
119 class CustomLogoutView(LogoutView):
120 def get_next_page(self):
121 next_page = super().get_next_page()
122 messages.success(self.request, "You have successfully logged out!")
123 return next_page
124
125
126 class UserListView(LoginRequiredMixin, SearchableListMixin, ListView):
127 """A list of all User objects
128
129 This view is equivalent to the user list view on the old Cantus.
130 This includes all User objects on the old Cantus.
131 When passed a `?q=<query>` argument in the GET request, it will filter users
132 based on the fields defined in `search_fields` with the `icontains` lookup.
133
134 Accessed by /users/
135 """
136
137 model = get_user_model()
138 ordering = "full_name"
139 search_fields = ["full_name", "institution", "city", "country"]
140 paginate_by = 100
141 template_name = "user_list.html"
142 context_object_name = "users"
143
144
145 class IndexerListView(SearchableListMixin, ListView):
146 """A list of User objects shown to the public
147
148 This view replaces the indexer list view on the old Cantus.
149 The indexers are considered a subset of all User objects, the subset shown to the public.
150 This includes the User objects corresponding to Indexer objects on the old Cantus.
151 When passed a `?q=<query>` argument in the GET request, it will filter users
152 based on the fields defined in `search_fields` with the `icontains` lookup.
153
154 Accessed by /indexers/
155 """
156
157 model = get_user_model()
158 ordering = "full_name"
159 search_fields = ["full_name", "institution", "city", "country"]
160 paginate_by = 100
161 template_name = "indexer_list.html"
162 context_object_name = "indexers"
163
164 def get_queryset(self):
165 all_users = super().get_queryset()
166 indexers = all_users.filter(is_indexer=True)
167 display_unpublished = self.request.user.is_authenticated
168 if display_unpublished:
169 indexers = indexers.annotate(source_count=Count("inventoried_sources"))
170 # display those who have at least one source
171 return indexers.filter(source_count__gte=1)
172 else:
173 indexers = indexers.annotate(
174 source_count=Count(
175 "inventoried_sources", filter=Q(inventoried_sources__published=True)
176 )
177 )
178 # display those who have at least one published source
179 return indexers.filter(source_count__gte=1)
180
[end of django/cantusdb_project/main_app/views/user.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/django/cantusdb_project/main_app/templatetags/helper_tags.py b/django/cantusdb_project/main_app/templatetags/helper_tags.py
--- a/django/cantusdb_project/main_app/templatetags/helper_tags.py
+++ b/django/cantusdb_project/main_app/templatetags/helper_tags.py
@@ -134,7 +134,7 @@
Source.objects.filter(
Q(current_editors=context["user"]) | Q(created_by=context["user"])
)
- .order_by("-date_created")
+ .order_by("-date_updated")
.distinct()
)
paginator = Paginator(user_created_sources, 6)
diff --git a/django/cantusdb_project/main_app/views/user.py b/django/cantusdb_project/main_app/views/user.py
--- a/django/cantusdb_project/main_app/views/user.py
+++ b/django/cantusdb_project/main_app/views/user.py
@@ -104,7 +104,7 @@
user_created_sources = (
Source.objects.filter(created_by=self.request.user)
- .order_by("-date_created")
+ .order_by("-date_updated")
.distinct()
)
user_created_paginator = Paginator(user_created_sources, 6)
|
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/templatetags/helper_tags.py b/django/cantusdb_project/main_app/templatetags/helper_tags.py\n--- a/django/cantusdb_project/main_app/templatetags/helper_tags.py\n+++ b/django/cantusdb_project/main_app/templatetags/helper_tags.py\n@@ -134,7 +134,7 @@\n Source.objects.filter(\n Q(current_editors=context[\"user\"]) | Q(created_by=context[\"user\"])\n )\n- .order_by(\"-date_created\")\n+ .order_by(\"-date_updated\")\n .distinct()\n )\n paginator = Paginator(user_created_sources, 6)\ndiff --git a/django/cantusdb_project/main_app/views/user.py b/django/cantusdb_project/main_app/views/user.py\n--- a/django/cantusdb_project/main_app/views/user.py\n+++ b/django/cantusdb_project/main_app/views/user.py\n@@ -104,7 +104,7 @@\n \n user_created_sources = (\n Source.objects.filter(created_by=self.request.user)\n- .order_by(\"-date_created\")\n+ .order_by(\"-date_updated\")\n .distinct()\n )\n user_created_paginator = Paginator(user_created_sources, 6)\n", "issue": "\"My sources\" page and sidebar should be ordered by date updated\nHow are sources supposed to show up in \"My sources\"? I am an editor for some sources but they don't seem to be in \"my sources\", so what determines this?\r\n\n", "before_files": [{"content": "import calendar\nfrom typing import Union, Optional\nfrom django import template\nfrom main_app.models import Source\nfrom articles.models import Article\nfrom django.utils.safestring import mark_safe\nfrom django.urls import reverse\nfrom django.core.paginator import Paginator\nfrom django.db.models import Q\n\n\nregister = template.Library()\n\n\[email protected]_tag(takes_context=False)\ndef recent_articles():\n \"\"\"\n Generates a html unordered list of recent articles for display on the homepage\n\n Used in:\n templates/flatpages/default.html\n \"\"\"\n articles = Article.objects.order_by(\"-date_created\")[:5]\n list_item_template = '<li style=\"padding-bottom: 0.5em;\"><a href=\"{url}\">{title}</a><br><small>{date}</small></li>'\n list_items = [\n list_item_template.format(\n url=a.get_absolute_url(),\n title=a.title,\n date=a.date_created.strftime(\"%A %B %-d, %Y\"),\n )\n for a in articles\n ]\n list_items_string = \"\".join(list_items)\n recent_articles_string = \"<ul>{lis}</ul>\".format(lis=list_items_string)\n return mark_safe(recent_articles_string)\n\n\[email protected](name=\"month_to_string\")\ndef month_to_string(value: Optional[Union[str, int]]) -> Optional[Union[str, int]]:\n \"\"\"\n Converts month number to textual representation, 3 letters (Jan, Mar, etc)\n\n used in:\n main_app/templates/feast_detail.html\n main_app/templates/feast_list.html\n \"\"\"\n if type(value) == int and value in range(1, 13):\n return calendar.month_abbr[value]\n else:\n return value\n\n\[email protected]_tag(takes_context=True)\ndef url_add_get_params(context, **kwargs):\n \"\"\"\n accounts for the situations where there may be two paginations in one page\n\n Used in:\n main_app/templates/pagination.html\n main_app/templates/user_source_list.html\n \"\"\"\n query = context[\"request\"].GET.copy()\n if \"page\" in kwargs:\n query.pop(\"page\", None)\n if \"page2\" in kwargs:\n query.pop(\"page2\", None)\n query.update(kwargs)\n return query.urlencode()\n\n\[email protected]_tag(takes_context=False)\ndef source_links():\n \"\"\"\n Generates a series of html option tags linking to sources in Cantus Dabase, for display on the homepage\n\n Used in:\n templates/flatpages/default.html\n \"\"\"\n sources = (\n Source.objects.filter(published=True, segment__id=4063)\n .exclude(siglum=None)\n .values(\"siglum\", \"id\")\n .order_by(\"siglum\")\n )\n options = \"\"\n for source in sources:\n option_str = (\n f\"<option value=source/{source['id']}>{source['siglum']}</option>\\n\"\n )\n options += option_str\n\n return mark_safe(options)\n\n\[email protected]\ndef classname(obj):\n \"\"\"\n Returns the name of the object's class\n A use-case is: {% if object|classname == \"Notation\" %}\n\n Used in:\n main_app/templates/content_overview.html\n \"\"\"\n return obj.__class__.__name__\n\n\[email protected]\ndef admin_url_name(class_name, action):\n \"\"\"\n Accepts the name of a class in \"main_app\", and an action (either \"change\" or \"delete\") as arguments.\n Returns the name of the URL for changing/deleting an object in the admin interface.\n\n Used in:\n main_app/templates/content_overview.html\n \"\"\"\n class_name = class_name.lower()\n action = action.lower()\n\n return f\"admin:main_app_{class_name}_{action}\"\n\n\[email protected](name=\"has_group\")\ndef has_group(user, group_name):\n \"\"\"\n Used in:\n templates/base.html\n \"\"\"\n return user.groups.filter(name=group_name).exists()\n\n\[email protected]_tag(takes_context=True)\ndef get_user_source_pagination(context):\n user_created_sources = (\n Source.objects.filter(\n Q(current_editors=context[\"user\"]) | Q(created_by=context[\"user\"])\n )\n .order_by(\"-date_created\")\n .distinct()\n )\n paginator = Paginator(user_created_sources, 6)\n page_number = context[\"request\"].GET.get(\"page\")\n user_sources_page_obj = paginator.get_page(page_number)\n return user_sources_page_obj\n\n\[email protected]_tag(takes_context=True)\ndef get_user_created_source_pagination(context):\n user_created_sources = (\n Source.objects.filter(created_by=context[\"user\"])\n .order_by(\"-date_created\")\n .distinct()\n )\n paginator = Paginator(user_created_sources, 6)\n page_number = context[\"request\"].GET.get(\"page2\")\n user_created_sources_page_obj = paginator.get_page(page_number)\n return user_created_sources_page_obj\n", "path": "django/cantusdb_project/main_app/templatetags/helper_tags.py"}, {"content": "from django.urls import reverse\nfrom django.db.models.aggregates import Count\nfrom django.views.generic import DetailView\nfrom django.contrib.auth import get_user_model, login as auth_login\nfrom main_app.models import Source\nfrom django.views.generic import ListView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.db.models import Q\nfrom django.core.paginator import Paginator\nfrom django.contrib.auth.views import LogoutView, LoginView\nfrom django.contrib import messages\nfrom extra_views import SearchableListMixin\nfrom django.http import HttpResponseRedirect\nfrom django.core.exceptions import PermissionDenied\nfrom main_app.permissions import user_can_view_user_detail\n\n\nclass UserDetailView(DetailView):\n \"\"\"Detail view for User model\n\n Accessed by /users/<pk>\n \"\"\"\n\n model = get_user_model()\n context_object_name = \"user\"\n template_name = \"user_detail.html\"\n\n def get_context_data(self, **kwargs):\n user = self.get_object()\n # to begin, if the person viewing the site is not logged in,\n # they should only be able to view the detail pages of indexers,\n # and not the detail pages of run-of-the-mill users\n viewing_user = self.request.user\n if not user_can_view_user_detail(viewing_user, user):\n raise PermissionDenied()\n\n context = super().get_context_data(**kwargs)\n display_unpublished = viewing_user.is_authenticated\n sort_by_siglum = lambda source: source.siglum\n if display_unpublished:\n context[\"inventoried_sources\"] = sorted(\n user.inventoried_sources.all(), key=sort_by_siglum\n )\n context[\"full_text_sources\"] = sorted(\n user.entered_full_text_for_sources.all(), key=sort_by_siglum\n )\n context[\"melody_sources\"] = sorted(\n user.entered_melody_for_sources.all(), key=sort_by_siglum\n )\n context[\"proofread_sources\"] = sorted(\n user.proofread_sources.all(), key=sort_by_siglum\n )\n context[\"edited_sources\"] = sorted(\n user.edited_sources.all(), key=sort_by_siglum\n )\n else:\n context[\"inventoried_sources\"] = sorted(\n user.inventoried_sources.all().filter(published=True),\n key=sort_by_siglum,\n )\n context[\"full_text_sources\"] = sorted(\n user.entered_full_text_for_sources.all().filter(published=True),\n key=sort_by_siglum,\n )\n context[\"melody_sources\"] = sorted(\n user.entered_melody_for_sources.all().filter(published=True),\n key=sort_by_siglum,\n )\n context[\"proofread_sources\"] = sorted(\n user.proofread_sources.all().filter(published=True), key=sort_by_siglum\n )\n context[\"edited_sources\"] = sorted(\n user.edited_sources.all().filter(published=True), key=sort_by_siglum\n )\n\n return context\n\n\nclass UserSourceListView(LoginRequiredMixin, ListView):\n model = Source\n context_object_name = \"sources\"\n template_name = \"user_source_list.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n my_sources = (\n Source.objects.filter(\n Q(current_editors=self.request.user)\n | Q(created_by=self.request.user)\n # | Q(inventoried_by=self.request.user)\n # | Q(full_text_entered_by=self.request.user)\n # | Q(melodies_entered_by=self.request.user)\n # | Q(proofreaders=self.request.user)\n # | Q(other_editors=self.request.user)\n )\n .order_by(\"-date_created\")\n .distinct()\n )\n\n user_sources_paginator = Paginator(my_sources, 10)\n user_sources_page_num = self.request.GET.get(\"page\")\n user_sources_page_obj = user_sources_paginator.get_page(user_sources_page_num)\n\n user_created_sources = (\n Source.objects.filter(created_by=self.request.user)\n .order_by(\"-date_created\")\n .distinct()\n )\n user_created_paginator = Paginator(user_created_sources, 6)\n user_created_page_num = self.request.GET.get(\"page2\")\n user_created_page_obj = user_created_paginator.get_page(user_created_page_num)\n\n context[\"page_obj\"] = user_sources_page_obj\n context[\"user_created_sources_page_obj\"] = user_created_page_obj\n return context\n\n\nclass CustomLogoutView(LogoutView):\n def get_next_page(self):\n next_page = super().get_next_page()\n messages.success(self.request, \"You have successfully logged out!\")\n return next_page\n\n\nclass UserListView(LoginRequiredMixin, SearchableListMixin, ListView):\n \"\"\"A list of all User objects\n\n This view is equivalent to the user list view on the old Cantus.\n This includes all User objects on the old Cantus.\n When passed a `?q=<query>` argument in the GET request, it will filter users\n based on the fields defined in `search_fields` with the `icontains` lookup.\n\n Accessed by /users/\n \"\"\"\n\n model = get_user_model()\n ordering = \"full_name\"\n search_fields = [\"full_name\", \"institution\", \"city\", \"country\"]\n paginate_by = 100\n template_name = \"user_list.html\"\n context_object_name = \"users\"\n\n\nclass IndexerListView(SearchableListMixin, ListView):\n \"\"\"A list of User objects shown to the public\n\n This view replaces the indexer list view on the old Cantus.\n The indexers are considered a subset of all User objects, the subset shown to the public.\n This includes the User objects corresponding to Indexer objects on the old Cantus.\n When passed a `?q=<query>` argument in the GET request, it will filter users\n based on the fields defined in `search_fields` with the `icontains` lookup.\n\n Accessed by /indexers/\n \"\"\"\n\n model = get_user_model()\n ordering = \"full_name\"\n search_fields = [\"full_name\", \"institution\", \"city\", \"country\"]\n paginate_by = 100\n template_name = \"indexer_list.html\"\n context_object_name = \"indexers\"\n\n def get_queryset(self):\n all_users = super().get_queryset()\n indexers = all_users.filter(is_indexer=True)\n display_unpublished = self.request.user.is_authenticated\n if display_unpublished:\n indexers = indexers.annotate(source_count=Count(\"inventoried_sources\"))\n # display those who have at least one source\n return indexers.filter(source_count__gte=1)\n else:\n indexers = indexers.annotate(\n source_count=Count(\n \"inventoried_sources\", filter=Q(inventoried_sources__published=True)\n )\n )\n # display those who have at least one published source\n return indexers.filter(source_count__gte=1)\n", "path": "django/cantusdb_project/main_app/views/user.py"}]}
| 3,989 | 282 |
gh_patches_debug_21077
|
rasdani/github-patches
|
git_diff
|
frappe__frappe-25857
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add jitter on scheduled jobs
hourly, daily long processes if all started at once can cause sudden increase in workload if you have many sites/benches.
Adding simple jitter to scheduled time can lessen the impact of such issues. Jitter is common pattern used for solving problems with "frequency" becomes a problem. E.g. gunicorn adds jitter to avoid restarting all workers at same time, profilers add jitter to avoid amplifying some pattern of repeated work. retry/backoff implementations also use to avoid creating patterns.
Possible implementation: When importing scheduled job types add some random delays in cron. E.g. daily jobs will start in the range of 12:00-12:15 AM instead of all starting at 12:00 AM.
Cons: Some jobs are required to be executed at specific times e.g. birthday reminders. So adding negative offset can introduce bugs for them, positive offset however should be fine AFAIK.
</issue>
<code>
[start of frappe/core/doctype/scheduled_job_type/scheduled_job_type.py]
1 # Copyright (c) 2021, Frappe Technologies and contributors
2 # License: MIT. See LICENSE
3
4 import json
5 from datetime import datetime
6
7 import click
8 from croniter import CroniterBadCronError, croniter
9
10 import frappe
11 from frappe import _
12 from frappe.model.document import Document
13 from frappe.utils import get_datetime, now_datetime
14 from frappe.utils.background_jobs import enqueue, is_job_enqueued
15
16
17 class ScheduledJobType(Document):
18 # begin: auto-generated types
19 # This code is auto-generated. Do not modify anything in this block.
20
21 from typing import TYPE_CHECKING
22
23 if TYPE_CHECKING:
24 from frappe.types import DF
25
26 create_log: DF.Check
27 cron_format: DF.Data | None
28 frequency: DF.Literal[
29 "All",
30 "Hourly",
31 "Hourly Long",
32 "Daily",
33 "Daily Long",
34 "Weekly",
35 "Weekly Long",
36 "Monthly",
37 "Monthly Long",
38 "Cron",
39 "Yearly",
40 "Annual",
41 ]
42 last_execution: DF.Datetime | None
43 method: DF.Data
44 next_execution: DF.Datetime | None
45 server_script: DF.Link | None
46 stopped: DF.Check
47
48 # end: auto-generated types
49 def autoname(self):
50 self.name = ".".join(self.method.split(".")[-2:])
51
52 def validate(self):
53 if self.frequency != "All":
54 # force logging for all events other than continuous ones (ALL)
55 self.create_log = 1
56
57 if self.frequency == "Cron":
58 if not self.cron_format:
59 frappe.throw(_("Cron format is required for job types with Cron frequency."))
60 try:
61 croniter(self.cron_format)
62 except CroniterBadCronError:
63 frappe.throw(
64 _("{0} is not a valid Cron expression.").format(f"<code>{self.cron_format}</code>"),
65 title=_("Bad Cron Expression"),
66 )
67
68 def enqueue(self, force=False) -> bool:
69 # enqueue event if last execution is done
70 if self.is_event_due() or force:
71 if not self.is_job_in_queue():
72 enqueue(
73 "frappe.core.doctype.scheduled_job_type.scheduled_job_type.run_scheduled_job",
74 queue=self.get_queue_name(),
75 job_type=self.method,
76 job_id=self.rq_job_id,
77 )
78 return True
79 else:
80 frappe.logger("scheduler").error(
81 f"Skipped queueing {self.method} because it was found in queue for {frappe.local.site}"
82 )
83
84 return False
85
86 def is_event_due(self, current_time=None):
87 """Return true if event is due based on time lapsed since last execution"""
88 # if the next scheduled event is before NOW, then its due!
89 return self.get_next_execution() <= (current_time or now_datetime())
90
91 def is_job_in_queue(self) -> bool:
92 return is_job_enqueued(self.rq_job_id)
93
94 @property
95 def rq_job_id(self):
96 """Unique ID created to deduplicate jobs with single RQ call."""
97 return f"scheduled_job::{self.method}"
98
99 @property
100 def next_execution(self):
101 return self.get_next_execution()
102
103 def get_next_execution(self):
104 CRON_MAP = {
105 "Yearly": "0 0 1 1 *",
106 "Annual": "0 0 1 1 *",
107 "Monthly": "0 0 1 * *",
108 "Monthly Long": "0 0 1 * *",
109 "Weekly": "0 0 * * 0",
110 "Weekly Long": "0 0 * * 0",
111 "Daily": "0 0 * * *",
112 "Daily Long": "0 0 * * *",
113 "Hourly": "0 * * * *",
114 "Hourly Long": "0 * * * *",
115 "All": f"*/{(frappe.get_conf().scheduler_interval or 240) // 60} * * * *",
116 }
117
118 if not self.cron_format:
119 self.cron_format = CRON_MAP[self.frequency]
120
121 # If this is a cold start then last_execution will not be set.
122 # Creation is set as fallback because if very old fallback is set job might trigger
123 # immediately, even when it's meant to be daily.
124 # A dynamic fallback like current time might miss the scheduler interval and job will never start.
125 last_execution = get_datetime(self.last_execution or self.creation)
126 return croniter(self.cron_format, last_execution).get_next(datetime)
127
128 def execute(self):
129 self.scheduler_log = None
130 try:
131 self.log_status("Start")
132 if self.server_script:
133 script_name = frappe.db.get_value("Server Script", self.server_script)
134 if script_name:
135 frappe.get_doc("Server Script", script_name).execute_scheduled_method()
136 else:
137 frappe.get_attr(self.method)()
138 frappe.db.commit()
139 self.log_status("Complete")
140 except Exception:
141 frappe.db.rollback()
142 self.log_status("Failed")
143
144 def log_status(self, status):
145 # log file
146 frappe.logger("scheduler").info(f"Scheduled Job {status}: {self.method} for {frappe.local.site}")
147 self.update_scheduler_log(status)
148
149 def update_scheduler_log(self, status):
150 if not self.create_log:
151 # self.get_next_execution will work properly iff self.last_execution is properly set
152 if self.frequency == "All" and status == "Start":
153 self.db_set("last_execution", now_datetime(), update_modified=False)
154 frappe.db.commit()
155 return
156 if not self.scheduler_log:
157 self.scheduler_log = frappe.get_doc(
158 dict(doctype="Scheduled Job Log", scheduled_job_type=self.name)
159 ).insert(ignore_permissions=True)
160 self.scheduler_log.db_set("status", status)
161 if status == "Failed":
162 self.scheduler_log.db_set("details", frappe.get_traceback(with_context=True))
163 if status == "Start":
164 self.db_set("last_execution", now_datetime(), update_modified=False)
165 frappe.db.commit()
166
167 def get_queue_name(self):
168 return "long" if ("Long" in self.frequency) else "default"
169
170 def on_trash(self):
171 frappe.db.delete("Scheduled Job Log", {"scheduled_job_type": self.name})
172
173
174 @frappe.whitelist()
175 def execute_event(doc: str):
176 frappe.only_for("System Manager")
177 doc = json.loads(doc)
178 frappe.get_doc("Scheduled Job Type", doc.get("name")).enqueue(force=True)
179 return doc
180
181
182 def run_scheduled_job(job_type: str):
183 """This is a wrapper function that runs a hooks.scheduler_events method"""
184 try:
185 frappe.get_doc("Scheduled Job Type", dict(method=job_type)).execute()
186 except Exception:
187 print(frappe.get_traceback())
188
189
190 def sync_jobs(hooks: dict | None = None):
191 frappe.reload_doc("core", "doctype", "scheduled_job_type")
192 scheduler_events = hooks or frappe.get_hooks("scheduler_events")
193 all_events = insert_events(scheduler_events)
194 clear_events(all_events)
195
196
197 def insert_events(scheduler_events: dict) -> list:
198 cron_jobs, event_jobs = [], []
199 for event_type in scheduler_events:
200 events = scheduler_events.get(event_type)
201 if isinstance(events, dict):
202 cron_jobs += insert_cron_jobs(events)
203 else:
204 # hourly, daily etc
205 event_jobs += insert_event_jobs(events, event_type)
206 return cron_jobs + event_jobs
207
208
209 def insert_cron_jobs(events: dict) -> list:
210 cron_jobs = []
211 for cron_format in events:
212 for event in events.get(cron_format):
213 cron_jobs.append(event)
214 insert_single_event("Cron", event, cron_format)
215 return cron_jobs
216
217
218 def insert_event_jobs(events: list, event_type: str) -> list:
219 event_jobs = []
220 for event in events:
221 event_jobs.append(event)
222 frequency = event_type.replace("_", " ").title()
223 insert_single_event(frequency, event)
224 return event_jobs
225
226
227 def insert_single_event(frequency: str, event: str, cron_format: str | None = None):
228 cron_expr = {"cron_format": cron_format} if cron_format else {}
229
230 try:
231 frappe.get_attr(event)
232 except Exception as e:
233 click.secho(f"{event} is not a valid method: {e}", fg="yellow")
234
235 doc = frappe.get_doc(
236 {
237 "doctype": "Scheduled Job Type",
238 "method": event,
239 "cron_format": cron_format,
240 "frequency": frequency,
241 }
242 )
243
244 if not frappe.db.exists("Scheduled Job Type", {"method": event, "frequency": frequency, **cron_expr}):
245 savepoint = "scheduled_job_type_creation"
246 try:
247 frappe.db.savepoint(savepoint)
248 doc.insert()
249 except frappe.DuplicateEntryError:
250 frappe.db.rollback(save_point=savepoint)
251 doc.delete()
252 doc.insert()
253
254
255 def clear_events(all_events: list):
256 for event in frappe.get_all("Scheduled Job Type", fields=["name", "method", "server_script"]):
257 is_server_script = event.server_script
258 is_defined_in_hooks = event.method in all_events
259
260 if not (is_defined_in_hooks or is_server_script):
261 frappe.delete_doc("Scheduled Job Type", event.name)
262
[end of frappe/core/doctype/scheduled_job_type/scheduled_job_type.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py b/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py
--- a/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py
+++ b/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py
@@ -2,7 +2,8 @@
# License: MIT. See LICENSE
import json
-from datetime import datetime
+from datetime import datetime, timedelta
+from random import randint
import click
from croniter import CroniterBadCronError, croniter
@@ -123,7 +124,12 @@
# immediately, even when it's meant to be daily.
# A dynamic fallback like current time might miss the scheduler interval and job will never start.
last_execution = get_datetime(self.last_execution or self.creation)
- return croniter(self.cron_format, last_execution).get_next(datetime)
+ next_execution = croniter(self.cron_format, last_execution).get_next(datetime)
+
+ jitter = 0
+ if self.frequency in ("Hourly Long", "Daily Long"):
+ jitter = randint(1, 600)
+ return next_execution + timedelta(seconds=jitter)
def execute(self):
self.scheduler_log = None
|
{"golden_diff": "diff --git a/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py b/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py\n--- a/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py\n+++ b/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py\n@@ -2,7 +2,8 @@\n # License: MIT. See LICENSE\n \n import json\n-from datetime import datetime\n+from datetime import datetime, timedelta\n+from random import randint\n \n import click\n from croniter import CroniterBadCronError, croniter\n@@ -123,7 +124,12 @@\n \t\t# immediately, even when it's meant to be daily.\n \t\t# A dynamic fallback like current time might miss the scheduler interval and job will never start.\n \t\tlast_execution = get_datetime(self.last_execution or self.creation)\n-\t\treturn croniter(self.cron_format, last_execution).get_next(datetime)\n+\t\tnext_execution = croniter(self.cron_format, last_execution).get_next(datetime)\n+\n+\t\tjitter = 0\n+\t\tif self.frequency in (\"Hourly Long\", \"Daily Long\"):\n+\t\t\tjitter = randint(1, 600)\n+\t\treturn next_execution + timedelta(seconds=jitter)\n \n \tdef execute(self):\n \t\tself.scheduler_log = None\n", "issue": "Add jitter on scheduled jobs\nhourly, daily long processes if all started at once can cause sudden increase in workload if you have many sites/benches. \r\n\r\n\r\nAdding simple jitter to scheduled time can lessen the impact of such issues. Jitter is common pattern used for solving problems with \"frequency\" becomes a problem. E.g. gunicorn adds jitter to avoid restarting all workers at same time, profilers add jitter to avoid amplifying some pattern of repeated work. retry/backoff implementations also use to avoid creating patterns.\r\n\r\n\r\nPossible implementation: When importing scheduled job types add some random delays in cron. E.g. daily jobs will start in the range of 12:00-12:15 AM instead of all starting at 12:00 AM.\r\n\r\n\r\nCons: Some jobs are required to be executed at specific times e.g. birthday reminders. So adding negative offset can introduce bugs for them, positive offset however should be fine AFAIK. \n", "before_files": [{"content": "# Copyright (c) 2021, Frappe Technologies and contributors\n# License: MIT. See LICENSE\n\nimport json\nfrom datetime import datetime\n\nimport click\nfrom croniter import CroniterBadCronError, croniter\n\nimport frappe\nfrom frappe import _\nfrom frappe.model.document import Document\nfrom frappe.utils import get_datetime, now_datetime\nfrom frappe.utils.background_jobs import enqueue, is_job_enqueued\n\n\nclass ScheduledJobType(Document):\n\t# begin: auto-generated types\n\t# This code is auto-generated. Do not modify anything in this block.\n\n\tfrom typing import TYPE_CHECKING\n\n\tif TYPE_CHECKING:\n\t\tfrom frappe.types import DF\n\n\t\tcreate_log: DF.Check\n\t\tcron_format: DF.Data | None\n\t\tfrequency: DF.Literal[\n\t\t\t\"All\",\n\t\t\t\"Hourly\",\n\t\t\t\"Hourly Long\",\n\t\t\t\"Daily\",\n\t\t\t\"Daily Long\",\n\t\t\t\"Weekly\",\n\t\t\t\"Weekly Long\",\n\t\t\t\"Monthly\",\n\t\t\t\"Monthly Long\",\n\t\t\t\"Cron\",\n\t\t\t\"Yearly\",\n\t\t\t\"Annual\",\n\t\t]\n\t\tlast_execution: DF.Datetime | None\n\t\tmethod: DF.Data\n\t\tnext_execution: DF.Datetime | None\n\t\tserver_script: DF.Link | None\n\t\tstopped: DF.Check\n\n\t# end: auto-generated types\n\tdef autoname(self):\n\t\tself.name = \".\".join(self.method.split(\".\")[-2:])\n\n\tdef validate(self):\n\t\tif self.frequency != \"All\":\n\t\t\t# force logging for all events other than continuous ones (ALL)\n\t\t\tself.create_log = 1\n\n\t\tif self.frequency == \"Cron\":\n\t\t\tif not self.cron_format:\n\t\t\t\tfrappe.throw(_(\"Cron format is required for job types with Cron frequency.\"))\n\t\t\ttry:\n\t\t\t\tcroniter(self.cron_format)\n\t\t\texcept CroniterBadCronError:\n\t\t\t\tfrappe.throw(\n\t\t\t\t\t_(\"{0} is not a valid Cron expression.\").format(f\"<code>{self.cron_format}</code>\"),\n\t\t\t\t\ttitle=_(\"Bad Cron Expression\"),\n\t\t\t\t)\n\n\tdef enqueue(self, force=False) -> bool:\n\t\t# enqueue event if last execution is done\n\t\tif self.is_event_due() or force:\n\t\t\tif not self.is_job_in_queue():\n\t\t\t\tenqueue(\n\t\t\t\t\t\"frappe.core.doctype.scheduled_job_type.scheduled_job_type.run_scheduled_job\",\n\t\t\t\t\tqueue=self.get_queue_name(),\n\t\t\t\t\tjob_type=self.method,\n\t\t\t\t\tjob_id=self.rq_job_id,\n\t\t\t\t)\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tfrappe.logger(\"scheduler\").error(\n\t\t\t\t\tf\"Skipped queueing {self.method} because it was found in queue for {frappe.local.site}\"\n\t\t\t\t)\n\n\t\treturn False\n\n\tdef is_event_due(self, current_time=None):\n\t\t\"\"\"Return true if event is due based on time lapsed since last execution\"\"\"\n\t\t# if the next scheduled event is before NOW, then its due!\n\t\treturn self.get_next_execution() <= (current_time or now_datetime())\n\n\tdef is_job_in_queue(self) -> bool:\n\t\treturn is_job_enqueued(self.rq_job_id)\n\n\t@property\n\tdef rq_job_id(self):\n\t\t\"\"\"Unique ID created to deduplicate jobs with single RQ call.\"\"\"\n\t\treturn f\"scheduled_job::{self.method}\"\n\n\t@property\n\tdef next_execution(self):\n\t\treturn self.get_next_execution()\n\n\tdef get_next_execution(self):\n\t\tCRON_MAP = {\n\t\t\t\"Yearly\": \"0 0 1 1 *\",\n\t\t\t\"Annual\": \"0 0 1 1 *\",\n\t\t\t\"Monthly\": \"0 0 1 * *\",\n\t\t\t\"Monthly Long\": \"0 0 1 * *\",\n\t\t\t\"Weekly\": \"0 0 * * 0\",\n\t\t\t\"Weekly Long\": \"0 0 * * 0\",\n\t\t\t\"Daily\": \"0 0 * * *\",\n\t\t\t\"Daily Long\": \"0 0 * * *\",\n\t\t\t\"Hourly\": \"0 * * * *\",\n\t\t\t\"Hourly Long\": \"0 * * * *\",\n\t\t\t\"All\": f\"*/{(frappe.get_conf().scheduler_interval or 240) // 60} * * * *\",\n\t\t}\n\n\t\tif not self.cron_format:\n\t\t\tself.cron_format = CRON_MAP[self.frequency]\n\n\t\t# If this is a cold start then last_execution will not be set.\n\t\t# Creation is set as fallback because if very old fallback is set job might trigger\n\t\t# immediately, even when it's meant to be daily.\n\t\t# A dynamic fallback like current time might miss the scheduler interval and job will never start.\n\t\tlast_execution = get_datetime(self.last_execution or self.creation)\n\t\treturn croniter(self.cron_format, last_execution).get_next(datetime)\n\n\tdef execute(self):\n\t\tself.scheduler_log = None\n\t\ttry:\n\t\t\tself.log_status(\"Start\")\n\t\t\tif self.server_script:\n\t\t\t\tscript_name = frappe.db.get_value(\"Server Script\", self.server_script)\n\t\t\t\tif script_name:\n\t\t\t\t\tfrappe.get_doc(\"Server Script\", script_name).execute_scheduled_method()\n\t\t\telse:\n\t\t\t\tfrappe.get_attr(self.method)()\n\t\t\tfrappe.db.commit()\n\t\t\tself.log_status(\"Complete\")\n\t\texcept Exception:\n\t\t\tfrappe.db.rollback()\n\t\t\tself.log_status(\"Failed\")\n\n\tdef log_status(self, status):\n\t\t# log file\n\t\tfrappe.logger(\"scheduler\").info(f\"Scheduled Job {status}: {self.method} for {frappe.local.site}\")\n\t\tself.update_scheduler_log(status)\n\n\tdef update_scheduler_log(self, status):\n\t\tif not self.create_log:\n\t\t\t# self.get_next_execution will work properly iff self.last_execution is properly set\n\t\t\tif self.frequency == \"All\" and status == \"Start\":\n\t\t\t\tself.db_set(\"last_execution\", now_datetime(), update_modified=False)\n\t\t\t\tfrappe.db.commit()\n\t\t\treturn\n\t\tif not self.scheduler_log:\n\t\t\tself.scheduler_log = frappe.get_doc(\n\t\t\t\tdict(doctype=\"Scheduled Job Log\", scheduled_job_type=self.name)\n\t\t\t).insert(ignore_permissions=True)\n\t\tself.scheduler_log.db_set(\"status\", status)\n\t\tif status == \"Failed\":\n\t\t\tself.scheduler_log.db_set(\"details\", frappe.get_traceback(with_context=True))\n\t\tif status == \"Start\":\n\t\t\tself.db_set(\"last_execution\", now_datetime(), update_modified=False)\n\t\tfrappe.db.commit()\n\n\tdef get_queue_name(self):\n\t\treturn \"long\" if (\"Long\" in self.frequency) else \"default\"\n\n\tdef on_trash(self):\n\t\tfrappe.db.delete(\"Scheduled Job Log\", {\"scheduled_job_type\": self.name})\n\n\[email protected]()\ndef execute_event(doc: str):\n\tfrappe.only_for(\"System Manager\")\n\tdoc = json.loads(doc)\n\tfrappe.get_doc(\"Scheduled Job Type\", doc.get(\"name\")).enqueue(force=True)\n\treturn doc\n\n\ndef run_scheduled_job(job_type: str):\n\t\"\"\"This is a wrapper function that runs a hooks.scheduler_events method\"\"\"\n\ttry:\n\t\tfrappe.get_doc(\"Scheduled Job Type\", dict(method=job_type)).execute()\n\texcept Exception:\n\t\tprint(frappe.get_traceback())\n\n\ndef sync_jobs(hooks: dict | None = None):\n\tfrappe.reload_doc(\"core\", \"doctype\", \"scheduled_job_type\")\n\tscheduler_events = hooks or frappe.get_hooks(\"scheduler_events\")\n\tall_events = insert_events(scheduler_events)\n\tclear_events(all_events)\n\n\ndef insert_events(scheduler_events: dict) -> list:\n\tcron_jobs, event_jobs = [], []\n\tfor event_type in scheduler_events:\n\t\tevents = scheduler_events.get(event_type)\n\t\tif isinstance(events, dict):\n\t\t\tcron_jobs += insert_cron_jobs(events)\n\t\telse:\n\t\t\t# hourly, daily etc\n\t\t\tevent_jobs += insert_event_jobs(events, event_type)\n\treturn cron_jobs + event_jobs\n\n\ndef insert_cron_jobs(events: dict) -> list:\n\tcron_jobs = []\n\tfor cron_format in events:\n\t\tfor event in events.get(cron_format):\n\t\t\tcron_jobs.append(event)\n\t\t\tinsert_single_event(\"Cron\", event, cron_format)\n\treturn cron_jobs\n\n\ndef insert_event_jobs(events: list, event_type: str) -> list:\n\tevent_jobs = []\n\tfor event in events:\n\t\tevent_jobs.append(event)\n\t\tfrequency = event_type.replace(\"_\", \" \").title()\n\t\tinsert_single_event(frequency, event)\n\treturn event_jobs\n\n\ndef insert_single_event(frequency: str, event: str, cron_format: str | None = None):\n\tcron_expr = {\"cron_format\": cron_format} if cron_format else {}\n\n\ttry:\n\t\tfrappe.get_attr(event)\n\texcept Exception as e:\n\t\tclick.secho(f\"{event} is not a valid method: {e}\", fg=\"yellow\")\n\n\tdoc = frappe.get_doc(\n\t\t{\n\t\t\t\"doctype\": \"Scheduled Job Type\",\n\t\t\t\"method\": event,\n\t\t\t\"cron_format\": cron_format,\n\t\t\t\"frequency\": frequency,\n\t\t}\n\t)\n\n\tif not frappe.db.exists(\"Scheduled Job Type\", {\"method\": event, \"frequency\": frequency, **cron_expr}):\n\t\tsavepoint = \"scheduled_job_type_creation\"\n\t\ttry:\n\t\t\tfrappe.db.savepoint(savepoint)\n\t\t\tdoc.insert()\n\t\texcept frappe.DuplicateEntryError:\n\t\t\tfrappe.db.rollback(save_point=savepoint)\n\t\t\tdoc.delete()\n\t\t\tdoc.insert()\n\n\ndef clear_events(all_events: list):\n\tfor event in frappe.get_all(\"Scheduled Job Type\", fields=[\"name\", \"method\", \"server_script\"]):\n\t\tis_server_script = event.server_script\n\t\tis_defined_in_hooks = event.method in all_events\n\n\t\tif not (is_defined_in_hooks or is_server_script):\n\t\t\tfrappe.delete_doc(\"Scheduled Job Type\", event.name)\n", "path": "frappe/core/doctype/scheduled_job_type/scheduled_job_type.py"}]}
| 3,576 | 285 |
gh_patches_debug_54050
|
rasdani/github-patches
|
git_diff
|
python-discord__bot-1404
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add an `attachments` rule
# Abstract
We should have an antispam rule filtering small burst of images.
# Rationale
Currently, when a user posts 4 images in less than 10 seconds without any comment, the `duplicates` rule will trigger. While we still want to be informed when many images are posted, having the `duplicates` rule trigger doesn't make much sense. Besides, if different message content is given for each image, it will only trigger `burst` if more than 9 messages are sent in 10 seconds.
# Specification
- [ ] Make sure that the `duplicates` filter won't be triggered by messages with images. We can safely skip empty messages with attachments.
- [ ] Create an `images` filter based on `duplicates` that will trigger when more than 3 images are posted in less than 10 seconds. It should ignore the message content.
</issue>
<code>
[start of bot/rules/duplicates.py]
1 from typing import Dict, Iterable, List, Optional, Tuple
2
3 from discord import Member, Message
4
5
6 async def apply(
7 last_message: Message, recent_messages: List[Message], config: Dict[str, int]
8 ) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:
9 """Detects duplicated messages sent by a single user."""
10 relevant_messages = tuple(
11 msg
12 for msg in recent_messages
13 if (
14 msg.author == last_message.author
15 and msg.content == last_message.content
16 )
17 )
18
19 total_duplicated = len(relevant_messages)
20
21 if total_duplicated > config['max']:
22 return (
23 f"sent {total_duplicated} duplicated messages in {config['interval']}s",
24 (last_message.author,),
25 relevant_messages
26 )
27 return None
28
[end of bot/rules/duplicates.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bot/rules/duplicates.py b/bot/rules/duplicates.py
--- a/bot/rules/duplicates.py
+++ b/bot/rules/duplicates.py
@@ -13,6 +13,7 @@
if (
msg.author == last_message.author
and msg.content == last_message.content
+ and msg.content
)
)
|
{"golden_diff": "diff --git a/bot/rules/duplicates.py b/bot/rules/duplicates.py\n--- a/bot/rules/duplicates.py\n+++ b/bot/rules/duplicates.py\n@@ -13,6 +13,7 @@\n if (\n msg.author == last_message.author\n and msg.content == last_message.content\n+ and msg.content\n )\n )\n", "issue": "Add an `attachments` rule\n# Abstract \r\n\r\nWe should have an antispam rule filtering small burst of images. \r\n\r\n# Rationale\r\n\r\nCurrently, when a user posts 4 images in less than 10 seconds without any comment, the `duplicates` rule will trigger. While we still want to be informed when many images are posted, having the `duplicates` rule trigger doesn't make much sense. Besides, if different message content is given for each image, it will only trigger `burst` if more than 9 messages are sent in 10 seconds. \r\n\r\n# Specification\r\n\r\n- [ ] Make sure that the `duplicates` filter won't be triggered by messages with images. We can safely skip empty messages with attachments.\r\n- [ ] Create an `images` filter based on `duplicates` that will trigger when more than 3 images are posted in less than 10 seconds. It should ignore the message content. \n", "before_files": [{"content": "from typing import Dict, Iterable, List, Optional, Tuple\n\nfrom discord import Member, Message\n\n\nasync def apply(\n last_message: Message, recent_messages: List[Message], config: Dict[str, int]\n) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:\n \"\"\"Detects duplicated messages sent by a single user.\"\"\"\n relevant_messages = tuple(\n msg\n for msg in recent_messages\n if (\n msg.author == last_message.author\n and msg.content == last_message.content\n )\n )\n\n total_duplicated = len(relevant_messages)\n\n if total_duplicated > config['max']:\n return (\n f\"sent {total_duplicated} duplicated messages in {config['interval']}s\",\n (last_message.author,),\n relevant_messages\n )\n return None\n", "path": "bot/rules/duplicates.py"}]}
| 951 | 78 |
gh_patches_debug_36957
|
rasdani/github-patches
|
git_diff
|
ansible__ansible-12267
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add "<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>" replacer for non-Python modules.
In v2, the module-arguments file are no more passed to any modules, even if the style of the modules are `non_native_want_json` or `old`.
So, we have to embed `<<INCLUDE_ANSIBLE_MODULE_WINDOWS_ARGS>>` replacer in non-Python (nor PS1) modules to pass module-arguments.
It seems the name of the replacer is some kind of confusing, and adding the new replacer `<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>`, the alias of `<<INCLUDE_ANSIBLE_MODULE_WINDOWS_ARGS>>`, would be helpful.
</issue>
<code>
[start of lib/ansible/executor/module_common.py]
1 # (c) 2013-2014, Michael DeHaan <[email protected]>
2 # (c) 2015 Toshio Kuratomi <[email protected]>
3 #
4 # This file is part of Ansible
5 #
6 # Ansible is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation, either version 3 of the License, or
9 # (at your option) any later version.
10 #
11 # Ansible is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
18
19 # Make coding more python3-ish
20 from __future__ import (absolute_import, division, print_function)
21 __metaclass__ = type
22
23 # from python and deps
24 from six.moves import StringIO
25 import json
26 import os
27 import shlex
28
29 # from Ansible
30 from ansible import __version__
31 from ansible import constants as C
32 from ansible.errors import AnsibleError
33 from ansible.parsing.utils.jsonify import jsonify
34 from ansible.utils.unicode import to_bytes
35
36 REPLACER = "#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
37 REPLACER_ARGS = "\"<<INCLUDE_ANSIBLE_MODULE_ARGS>>\""
38 REPLACER_COMPLEX = "\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
39 REPLACER_WINDOWS = "# POWERSHELL_COMMON"
40 REPLACER_WINARGS = "<<INCLUDE_ANSIBLE_MODULE_WINDOWS_ARGS>>"
41 REPLACER_VERSION = "\"<<ANSIBLE_VERSION>>\""
42
43 # We could end up writing out parameters with unicode characters so we need to
44 # specify an encoding for the python source file
45 ENCODING_STRING = '# -*- coding: utf-8 -*-'
46
47 # we've moved the module_common relative to the snippets, so fix the path
48 _SNIPPET_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils')
49
50 # ******************************************************************************
51
52 def _slurp(path):
53 if not os.path.exists(path):
54 raise AnsibleError("imported module support code does not exist at %s" % path)
55 fd = open(path)
56 data = fd.read()
57 fd.close()
58 return data
59
60 def _find_snippet_imports(module_data, module_path, strip_comments):
61 """
62 Given the source of the module, convert it to a Jinja2 template to insert
63 module code and return whether it's a new or old style module.
64 """
65
66 module_style = 'old'
67 if REPLACER in module_data:
68 module_style = 'new'
69 elif REPLACER_WINDOWS in module_data:
70 module_style = 'new'
71 elif 'from ansible.module_utils.' in module_data:
72 module_style = 'new'
73 elif 'WANT_JSON' in module_data:
74 module_style = 'non_native_want_json'
75
76 output = StringIO()
77 lines = module_data.split('\n')
78 snippet_names = []
79
80 for line in lines:
81
82 if REPLACER in line:
83 output.write(_slurp(os.path.join(_SNIPPET_PATH, "basic.py")))
84 snippet_names.append('basic')
85 if REPLACER_WINDOWS in line:
86 ps_data = _slurp(os.path.join(_SNIPPET_PATH, "powershell.ps1"))
87 output.write(ps_data)
88 snippet_names.append('powershell')
89 elif line.startswith('from ansible.module_utils.'):
90 tokens=line.split(".")
91 import_error = False
92 if len(tokens) != 3:
93 import_error = True
94 if " import *" not in line:
95 import_error = True
96 if import_error:
97 raise AnsibleError("error importing module in %s, expecting format like 'from ansible.module_utils.basic import *'" % module_path)
98 snippet_name = tokens[2].split()[0]
99 snippet_names.append(snippet_name)
100 output.write(_slurp(os.path.join(_SNIPPET_PATH, snippet_name + ".py")))
101 else:
102 if strip_comments and line.startswith("#") or line == '':
103 pass
104 output.write(line)
105 output.write("\n")
106
107 if not module_path.endswith(".ps1"):
108 # Unixy modules
109 if len(snippet_names) > 0 and not 'basic' in snippet_names:
110 raise AnsibleError("missing required import in %s: from ansible.module_utils.basic import *" % module_path)
111 else:
112 # Windows modules
113 if len(snippet_names) > 0 and not 'powershell' in snippet_names:
114 raise AnsibleError("missing required import in %s: # POWERSHELL_COMMON" % module_path)
115
116 return (output.getvalue(), module_style)
117
118 # ******************************************************************************
119
120 def modify_module(module_path, module_args, task_vars=dict(), strip_comments=False):
121 """
122 Used to insert chunks of code into modules before transfer rather than
123 doing regular python imports. This allows for more efficient transfer in
124 a non-bootstrapping scenario by not moving extra files over the wire and
125 also takes care of embedding arguments in the transferred modules.
126
127 This version is done in such a way that local imports can still be
128 used in the module code, so IDEs don't have to be aware of what is going on.
129
130 Example:
131
132 from ansible.module_utils.basic import *
133
134 ... will result in the insertion of basic.py into the module
135 from the module_utils/ directory in the source tree.
136
137 All modules are required to import at least basic, though there will also
138 be other snippets.
139
140 For powershell, there's equivalent conventions like this:
141
142 # POWERSHELL_COMMON
143
144 which results in the inclusion of the common code from powershell.ps1
145
146 """
147 ### TODO: Optimization ideas if this code is actually a source of slowness:
148 # * Fix comment stripping: Currently doesn't preserve shebangs and encoding info (but we unconditionally add encoding info)
149 # * Use pyminifier if installed
150 # * comment stripping/pyminifier needs to have config setting to turn it
151 # off for debugging purposes (goes along with keep remote but should be
152 # separate otherwise users wouldn't be able to get info on what the
153 # minifier output)
154 # * Only split into lines and recombine into strings once
155 # * Cache the modified module? If only the args are different and we do
156 # that as the last step we could cache sll the work up to that point.
157
158 with open(module_path) as f:
159
160 # read in the module source
161 module_data = f.read()
162
163 (module_data, module_style) = _find_snippet_imports(module_data, module_path, strip_comments)
164
165 module_args_json = json.dumps(module_args)
166 encoded_args = repr(module_args_json.encode('utf-8'))
167
168 # these strings should be part of the 'basic' snippet which is required to be included
169 module_data = module_data.replace(REPLACER_VERSION, repr(__version__))
170 module_data = module_data.replace(REPLACER_COMPLEX, encoded_args)
171 module_data = module_data.replace(REPLACER_WINARGS, module_args_json.encode('utf-8'))
172
173 if module_style == 'new':
174 facility = C.DEFAULT_SYSLOG_FACILITY
175 if 'ansible_syslog_facility' in task_vars:
176 facility = task_vars['ansible_syslog_facility']
177 module_data = module_data.replace('syslog.LOG_USER', "syslog.%s" % facility)
178
179 lines = module_data.split(b"\n", 1)
180 shebang = None
181 if lines[0].startswith(b"#!"):
182 shebang = lines[0].strip()
183 args = shlex.split(str(shebang[2:]))
184 interpreter = args[0]
185 interpreter_config = 'ansible_%s_interpreter' % os.path.basename(interpreter)
186
187 if interpreter_config in task_vars:
188 interpreter = to_bytes(task_vars[interpreter_config], errors='strict')
189 lines[0] = shebang = b"#!{0} {1}".format(interpreter, b" ".join(args[1:]))
190
191 if interpreter.startswith('python'):
192 lines.insert(1, ENCODING_STRING)
193 else:
194 # No shebang, assume a binary module?
195 pass
196
197 module_data = b"\n".join(lines)
198
199 return (module_data, module_style, shebang)
200
201
[end of lib/ansible/executor/module_common.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lib/ansible/executor/module_common.py b/lib/ansible/executor/module_common.py
--- a/lib/ansible/executor/module_common.py
+++ b/lib/ansible/executor/module_common.py
@@ -33,12 +33,13 @@
from ansible.parsing.utils.jsonify import jsonify
from ansible.utils.unicode import to_bytes
-REPLACER = "#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
-REPLACER_ARGS = "\"<<INCLUDE_ANSIBLE_MODULE_ARGS>>\""
-REPLACER_COMPLEX = "\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
-REPLACER_WINDOWS = "# POWERSHELL_COMMON"
-REPLACER_WINARGS = "<<INCLUDE_ANSIBLE_MODULE_WINDOWS_ARGS>>"
-REPLACER_VERSION = "\"<<ANSIBLE_VERSION>>\""
+REPLACER = "#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
+REPLACER_ARGS = "\"<<INCLUDE_ANSIBLE_MODULE_ARGS>>\""
+REPLACER_COMPLEX = "\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
+REPLACER_WINDOWS = "# POWERSHELL_COMMON"
+REPLACER_WINARGS = "<<INCLUDE_ANSIBLE_MODULE_WINDOWS_ARGS>>"
+REPLACER_JSONARGS = "<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>"
+REPLACER_VERSION = "\"<<ANSIBLE_VERSION>>\""
# We could end up writing out parameters with unicode characters so we need to
# specify an encoding for the python source file
@@ -68,6 +69,8 @@
module_style = 'new'
elif REPLACER_WINDOWS in module_data:
module_style = 'new'
+ elif REPLACER_JSONARGS in module_data:
+ module_style = 'new'
elif 'from ansible.module_utils.' in module_data:
module_style = 'new'
elif 'WANT_JSON' in module_data:
@@ -162,13 +165,14 @@
(module_data, module_style) = _find_snippet_imports(module_data, module_path, strip_comments)
- module_args_json = json.dumps(module_args)
- encoded_args = repr(module_args_json.encode('utf-8'))
+ module_args_json = json.dumps(module_args).encode('utf-8')
+ python_repred_args = repr(module_args_json)
# these strings should be part of the 'basic' snippet which is required to be included
module_data = module_data.replace(REPLACER_VERSION, repr(__version__))
- module_data = module_data.replace(REPLACER_COMPLEX, encoded_args)
- module_data = module_data.replace(REPLACER_WINARGS, module_args_json.encode('utf-8'))
+ module_data = module_data.replace(REPLACER_COMPLEX, python_repred_args)
+ module_data = module_data.replace(REPLACER_WINARGS, module_args_json)
+ module_data = module_data.replace(REPLACER_JSONARGS, module_args_json)
if module_style == 'new':
facility = C.DEFAULT_SYSLOG_FACILITY
|
{"golden_diff": "diff --git a/lib/ansible/executor/module_common.py b/lib/ansible/executor/module_common.py\n--- a/lib/ansible/executor/module_common.py\n+++ b/lib/ansible/executor/module_common.py\n@@ -33,12 +33,13 @@\n from ansible.parsing.utils.jsonify import jsonify\n from ansible.utils.unicode import to_bytes\n \n-REPLACER = \"#<<INCLUDE_ANSIBLE_MODULE_COMMON>>\"\n-REPLACER_ARGS = \"\\\"<<INCLUDE_ANSIBLE_MODULE_ARGS>>\\\"\"\n-REPLACER_COMPLEX = \"\\\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\\\"\"\n-REPLACER_WINDOWS = \"# POWERSHELL_COMMON\"\n-REPLACER_WINARGS = \"<<INCLUDE_ANSIBLE_MODULE_WINDOWS_ARGS>>\"\n-REPLACER_VERSION = \"\\\"<<ANSIBLE_VERSION>>\\\"\"\n+REPLACER = \"#<<INCLUDE_ANSIBLE_MODULE_COMMON>>\"\n+REPLACER_ARGS = \"\\\"<<INCLUDE_ANSIBLE_MODULE_ARGS>>\\\"\"\n+REPLACER_COMPLEX = \"\\\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\\\"\"\n+REPLACER_WINDOWS = \"# POWERSHELL_COMMON\"\n+REPLACER_WINARGS = \"<<INCLUDE_ANSIBLE_MODULE_WINDOWS_ARGS>>\"\n+REPLACER_JSONARGS = \"<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>\"\n+REPLACER_VERSION = \"\\\"<<ANSIBLE_VERSION>>\\\"\"\n \n # We could end up writing out parameters with unicode characters so we need to\n # specify an encoding for the python source file\n@@ -68,6 +69,8 @@\n module_style = 'new'\n elif REPLACER_WINDOWS in module_data:\n module_style = 'new'\n+ elif REPLACER_JSONARGS in module_data:\n+ module_style = 'new'\n elif 'from ansible.module_utils.' in module_data:\n module_style = 'new'\n elif 'WANT_JSON' in module_data:\n@@ -162,13 +165,14 @@\n \n (module_data, module_style) = _find_snippet_imports(module_data, module_path, strip_comments)\n \n- module_args_json = json.dumps(module_args)\n- encoded_args = repr(module_args_json.encode('utf-8'))\n+ module_args_json = json.dumps(module_args).encode('utf-8')\n+ python_repred_args = repr(module_args_json)\n \n # these strings should be part of the 'basic' snippet which is required to be included\n module_data = module_data.replace(REPLACER_VERSION, repr(__version__))\n- module_data = module_data.replace(REPLACER_COMPLEX, encoded_args)\n- module_data = module_data.replace(REPLACER_WINARGS, module_args_json.encode('utf-8'))\n+ module_data = module_data.replace(REPLACER_COMPLEX, python_repred_args)\n+ module_data = module_data.replace(REPLACER_WINARGS, module_args_json)\n+ module_data = module_data.replace(REPLACER_JSONARGS, module_args_json)\n \n if module_style == 'new':\n facility = C.DEFAULT_SYSLOG_FACILITY\n", "issue": "Add \"<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>\" replacer for non-Python modules.\nIn v2, the module-arguments file are no more passed to any modules, even if the style of the modules are `non_native_want_json` or `old`.\nSo, we have to embed `<<INCLUDE_ANSIBLE_MODULE_WINDOWS_ARGS>>` replacer in non-Python (nor PS1) modules to pass module-arguments.\nIt seems the name of the replacer is some kind of confusing, and adding the new replacer `<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>`, the alias of `<<INCLUDE_ANSIBLE_MODULE_WINDOWS_ARGS>>`, would be helpful.\n\n", "before_files": [{"content": "# (c) 2013-2014, Michael DeHaan <[email protected]>\n# (c) 2015 Toshio Kuratomi <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\n# Make coding more python3-ish\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\n# from python and deps\nfrom six.moves import StringIO\nimport json\nimport os\nimport shlex\n\n# from Ansible\nfrom ansible import __version__\nfrom ansible import constants as C\nfrom ansible.errors import AnsibleError\nfrom ansible.parsing.utils.jsonify import jsonify\nfrom ansible.utils.unicode import to_bytes\n\nREPLACER = \"#<<INCLUDE_ANSIBLE_MODULE_COMMON>>\"\nREPLACER_ARGS = \"\\\"<<INCLUDE_ANSIBLE_MODULE_ARGS>>\\\"\"\nREPLACER_COMPLEX = \"\\\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\\\"\"\nREPLACER_WINDOWS = \"# POWERSHELL_COMMON\"\nREPLACER_WINARGS = \"<<INCLUDE_ANSIBLE_MODULE_WINDOWS_ARGS>>\"\nREPLACER_VERSION = \"\\\"<<ANSIBLE_VERSION>>\\\"\"\n\n# We could end up writing out parameters with unicode characters so we need to\n# specify an encoding for the python source file\nENCODING_STRING = '# -*- coding: utf-8 -*-'\n\n# we've moved the module_common relative to the snippets, so fix the path\n_SNIPPET_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils')\n\n# ******************************************************************************\n\ndef _slurp(path):\n if not os.path.exists(path):\n raise AnsibleError(\"imported module support code does not exist at %s\" % path)\n fd = open(path)\n data = fd.read()\n fd.close()\n return data\n\ndef _find_snippet_imports(module_data, module_path, strip_comments):\n \"\"\"\n Given the source of the module, convert it to a Jinja2 template to insert\n module code and return whether it's a new or old style module.\n \"\"\"\n\n module_style = 'old'\n if REPLACER in module_data:\n module_style = 'new'\n elif REPLACER_WINDOWS in module_data:\n module_style = 'new'\n elif 'from ansible.module_utils.' in module_data:\n module_style = 'new'\n elif 'WANT_JSON' in module_data:\n module_style = 'non_native_want_json'\n\n output = StringIO()\n lines = module_data.split('\\n')\n snippet_names = []\n\n for line in lines:\n\n if REPLACER in line:\n output.write(_slurp(os.path.join(_SNIPPET_PATH, \"basic.py\")))\n snippet_names.append('basic')\n if REPLACER_WINDOWS in line:\n ps_data = _slurp(os.path.join(_SNIPPET_PATH, \"powershell.ps1\"))\n output.write(ps_data)\n snippet_names.append('powershell')\n elif line.startswith('from ansible.module_utils.'):\n tokens=line.split(\".\")\n import_error = False\n if len(tokens) != 3:\n import_error = True\n if \" import *\" not in line:\n import_error = True\n if import_error:\n raise AnsibleError(\"error importing module in %s, expecting format like 'from ansible.module_utils.basic import *'\" % module_path)\n snippet_name = tokens[2].split()[0]\n snippet_names.append(snippet_name)\n output.write(_slurp(os.path.join(_SNIPPET_PATH, snippet_name + \".py\")))\n else:\n if strip_comments and line.startswith(\"#\") or line == '':\n pass\n output.write(line)\n output.write(\"\\n\")\n\n if not module_path.endswith(\".ps1\"):\n # Unixy modules\n if len(snippet_names) > 0 and not 'basic' in snippet_names:\n raise AnsibleError(\"missing required import in %s: from ansible.module_utils.basic import *\" % module_path)\n else:\n # Windows modules\n if len(snippet_names) > 0 and not 'powershell' in snippet_names:\n raise AnsibleError(\"missing required import in %s: # POWERSHELL_COMMON\" % module_path)\n\n return (output.getvalue(), module_style)\n\n# ******************************************************************************\n\ndef modify_module(module_path, module_args, task_vars=dict(), strip_comments=False):\n \"\"\"\n Used to insert chunks of code into modules before transfer rather than\n doing regular python imports. This allows for more efficient transfer in\n a non-bootstrapping scenario by not moving extra files over the wire and\n also takes care of embedding arguments in the transferred modules.\n\n This version is done in such a way that local imports can still be\n used in the module code, so IDEs don't have to be aware of what is going on.\n\n Example:\n\n from ansible.module_utils.basic import *\n\n ... will result in the insertion of basic.py into the module\n from the module_utils/ directory in the source tree.\n\n All modules are required to import at least basic, though there will also\n be other snippets.\n\n For powershell, there's equivalent conventions like this:\n\n # POWERSHELL_COMMON\n\n which results in the inclusion of the common code from powershell.ps1\n\n \"\"\"\n ### TODO: Optimization ideas if this code is actually a source of slowness:\n # * Fix comment stripping: Currently doesn't preserve shebangs and encoding info (but we unconditionally add encoding info)\n # * Use pyminifier if installed\n # * comment stripping/pyminifier needs to have config setting to turn it\n # off for debugging purposes (goes along with keep remote but should be\n # separate otherwise users wouldn't be able to get info on what the\n # minifier output)\n # * Only split into lines and recombine into strings once\n # * Cache the modified module? If only the args are different and we do\n # that as the last step we could cache sll the work up to that point.\n\n with open(module_path) as f:\n\n # read in the module source\n module_data = f.read()\n\n (module_data, module_style) = _find_snippet_imports(module_data, module_path, strip_comments)\n\n module_args_json = json.dumps(module_args)\n encoded_args = repr(module_args_json.encode('utf-8'))\n\n # these strings should be part of the 'basic' snippet which is required to be included\n module_data = module_data.replace(REPLACER_VERSION, repr(__version__))\n module_data = module_data.replace(REPLACER_COMPLEX, encoded_args)\n module_data = module_data.replace(REPLACER_WINARGS, module_args_json.encode('utf-8'))\n\n if module_style == 'new':\n facility = C.DEFAULT_SYSLOG_FACILITY\n if 'ansible_syslog_facility' in task_vars:\n facility = task_vars['ansible_syslog_facility']\n module_data = module_data.replace('syslog.LOG_USER', \"syslog.%s\" % facility)\n\n lines = module_data.split(b\"\\n\", 1)\n shebang = None\n if lines[0].startswith(b\"#!\"):\n shebang = lines[0].strip()\n args = shlex.split(str(shebang[2:]))\n interpreter = args[0]\n interpreter_config = 'ansible_%s_interpreter' % os.path.basename(interpreter)\n\n if interpreter_config in task_vars:\n interpreter = to_bytes(task_vars[interpreter_config], errors='strict')\n lines[0] = shebang = b\"#!{0} {1}\".format(interpreter, b\" \".join(args[1:]))\n\n if interpreter.startswith('python'):\n lines.insert(1, ENCODING_STRING)\n else:\n # No shebang, assume a binary module?\n pass\n\n module_data = b\"\\n\".join(lines)\n\n return (module_data, module_style, shebang)\n\n", "path": "lib/ansible/executor/module_common.py"}]}
| 3,029 | 658 |
gh_patches_debug_21398
|
rasdani/github-patches
|
git_diff
|
vispy__vispy-751
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
vispy.plot.image fails on float64 textures
</issue>
<code>
[start of vispy/visuals/image.py]
1 # -*- coding: utf-8 -*-
2 # Copyright (c) 2014, Vispy Development Team.
3 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
4
5 from __future__ import division
6
7 import numpy as np
8
9 from .. import gloo
10 from .transforms import STTransform, NullTransform
11 from .modular_mesh import ModularMesh
12 from .components import (TextureComponent, VertexTextureCoordinateComponent,
13 TextureCoordinateComponent)
14
15
16 class ImageVisual(ModularMesh):
17 """Visual subclass displaying an image.
18
19 Parameters
20 ----------
21 data : (height, width, 4) ubyte array
22 ImageVisual data.
23 method : str
24 Selects method of rendering image in case of non-linear transforms.
25 Each method produces similar results, but may trade efficiency
26 and accuracy. If the transform is linear, this parameter is ignored
27 and a single quad is drawn around the area of the image.
28
29 * 'subdivide': ImageVisual is represented as a grid of triangles
30 with texture coordinates linearly mapped.
31 * 'impostor': ImageVisual is represented as a quad covering the
32 entire view, with texture coordinates determined by the
33 transform. This produces the best transformation results, but may
34 be slow.
35
36 grid: tuple (rows, cols)
37 If method='subdivide', this tuple determines the number of rows and
38 columns in the image grid.
39 """
40 def __init__(self, data, method='subdivide', grid=(10, 10), **kwargs):
41 super(ImageVisual, self).__init__(**kwargs)
42
43 self._data = None
44
45 # maps from quad coordinates to texture coordinates
46 self._tex_transform = STTransform()
47
48 self._texture = None
49 self._interpolation = 'nearest'
50 self.set_data(data)
51 self.set_gl_options(cull_face=('front_and_back',))
52
53 self.method = method
54 self.grid = grid
55
56 def set_data(self, image=None, **kwds):
57 if image is not None:
58 self._data = image
59 self._texture = None
60 super(ImageVisual, self).set_data(**kwds)
61
62 @property
63 def interpolation(self):
64 return self._interpolation
65
66 @interpolation.setter
67 def interpolation(self, interp):
68 self._interpolation = interp
69 self.update()
70
71 @property
72 def size(self):
73 return self._data.shape[:2][::-1]
74
75 def _build_data(self, transforms):
76 # Construct complete data array with position and optionally color
77 if transforms.get_full_transform().Linear:
78 method = 'subdivide'
79 grid = (1, 1)
80 else:
81 method = self.method
82 grid = self.grid
83
84 # TODO: subdivision and impostor modes should be handled by new
85 # components?
86 if method == 'subdivide':
87 # quads cover area of image as closely as possible
88 w = 1.0 / grid[1]
89 h = 1.0 / grid[0]
90
91 quad = np.array([[0, 0, 0], [w, 0, 0], [w, h, 0],
92 [0, 0, 0], [w, h, 0], [0, h, 0]],
93 dtype=np.float32)
94 quads = np.empty((grid[1], grid[0], 6, 3), dtype=np.float32)
95 quads[:] = quad
96
97 mgrid = np.mgrid[0.:grid[1], 0.:grid[0]].transpose(1, 2, 0)
98 mgrid = mgrid[:, :, np.newaxis, :]
99 mgrid[..., 0] *= w
100 mgrid[..., 1] *= h
101
102 quads[..., :2] += mgrid
103 tex_coords = quads.reshape(grid[1]*grid[0]*6, 3)
104 vertices = tex_coords.copy()
105 vertices[..., 0] *= self._data.shape[1]
106 vertices[..., 1] *= self._data.shape[0]
107 ModularMesh.set_data(self, pos=vertices)
108 coords = np.ascontiguousarray(tex_coords[:, :2])
109 tex_coord_comp = TextureCoordinateComponent(coords)
110 elif method == 'impostor':
111 # quad covers entire view; frag. shader will deal with image shape
112 quad = np.array([[-1, -1, 0], [1, -1, 0], [1, 1, 0],
113 [-1, -1, 0], [1, 1, 0], [-1, 1, 0]],
114 dtype=np.float32)
115 ModularMesh.set_data(self, pos=quad)
116
117 self._tex_transform.scale = (1./self._data.shape[0],
118 1./self._data.shape[1])
119 ctr = transforms.get_full_transform().inverse
120 total_transform = self._tex_transform * ctr
121 tex_coord_comp = VertexTextureCoordinateComponent(total_transform)
122 tr = NullTransform()
123 self._program.vert['map_local_to_nd'] = tr
124 else:
125 raise ValueError("Unknown image draw method '%s'" % method)
126
127 data = self._data
128 self._texture = gloo.Texture2D(data)
129 self._texture.interpolation = self._interpolation
130
131 self.color_components = [TextureComponent(self._texture,
132 tex_coord_comp)]
133
134 def _activate_transform(self, transforms=None):
135 # this is handled in _build_data instead.
136 pass
137
138 def bounds(self, mode, axis):
139 if axis > 1:
140 return (0, 0)
141 else:
142 return (0, self.size[axis])
143
144 def draw(self, transforms):
145 if self._data is None:
146 return
147
148 if transforms.get_full_transform().Linear:
149 method = 'subdivide'
150 else:
151 method = self.method
152
153 # always have to rebuild for impostor, only first for subdivide
154 if self._texture is None:
155 self._build_data(transforms)
156 if method == 'subdivide':
157 tr = transforms.get_full_transform()
158 self._program.vert['map_local_to_nd'] = tr
159
160 super(ImageVisual, self).draw(transforms)
161
[end of vispy/visuals/image.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/vispy/visuals/image.py b/vispy/visuals/image.py
--- a/vispy/visuals/image.py
+++ b/vispy/visuals/image.py
@@ -28,8 +28,8 @@
* 'subdivide': ImageVisual is represented as a grid of triangles
with texture coordinates linearly mapped.
- * 'impostor': ImageVisual is represented as a quad covering the
- entire view, with texture coordinates determined by the
+ * 'impostor': ImageVisual is represented as a quad covering the
+ entire view, with texture coordinates determined by the
transform. This produces the best transformation results, but may
be slow.
@@ -55,6 +55,9 @@
def set_data(self, image=None, **kwds):
if image is not None:
+ image = np.array(image, copy=False)
+ if image.dtype == np.float64:
+ image = image.astype(np.float32)
self._data = image
self._texture = None
super(ImageVisual, self).set_data(**kwds)
|
{"golden_diff": "diff --git a/vispy/visuals/image.py b/vispy/visuals/image.py\n--- a/vispy/visuals/image.py\n+++ b/vispy/visuals/image.py\n@@ -28,8 +28,8 @@\n \n * 'subdivide': ImageVisual is represented as a grid of triangles\n with texture coordinates linearly mapped.\n- * 'impostor': ImageVisual is represented as a quad covering the \n- entire view, with texture coordinates determined by the \n+ * 'impostor': ImageVisual is represented as a quad covering the\n+ entire view, with texture coordinates determined by the\n transform. This produces the best transformation results, but may\n be slow.\n \n@@ -55,6 +55,9 @@\n \n def set_data(self, image=None, **kwds):\n if image is not None:\n+ image = np.array(image, copy=False)\n+ if image.dtype == np.float64:\n+ image = image.astype(np.float32)\n self._data = image\n self._texture = None\n super(ImageVisual, self).set_data(**kwds)\n", "issue": "vispy.plot.image fails on float64 textures\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2014, Vispy Development Team.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\nfrom __future__ import division\n\nimport numpy as np\n\nfrom .. import gloo\nfrom .transforms import STTransform, NullTransform\nfrom .modular_mesh import ModularMesh\nfrom .components import (TextureComponent, VertexTextureCoordinateComponent,\n TextureCoordinateComponent)\n\n\nclass ImageVisual(ModularMesh):\n \"\"\"Visual subclass displaying an image.\n\n Parameters\n ----------\n data : (height, width, 4) ubyte array\n ImageVisual data.\n method : str\n Selects method of rendering image in case of non-linear transforms.\n Each method produces similar results, but may trade efficiency\n and accuracy. If the transform is linear, this parameter is ignored\n and a single quad is drawn around the area of the image.\n\n * 'subdivide': ImageVisual is represented as a grid of triangles\n with texture coordinates linearly mapped.\n * 'impostor': ImageVisual is represented as a quad covering the \n entire view, with texture coordinates determined by the \n transform. This produces the best transformation results, but may\n be slow.\n\n grid: tuple (rows, cols)\n If method='subdivide', this tuple determines the number of rows and\n columns in the image grid.\n \"\"\"\n def __init__(self, data, method='subdivide', grid=(10, 10), **kwargs):\n super(ImageVisual, self).__init__(**kwargs)\n\n self._data = None\n\n # maps from quad coordinates to texture coordinates\n self._tex_transform = STTransform()\n\n self._texture = None\n self._interpolation = 'nearest'\n self.set_data(data)\n self.set_gl_options(cull_face=('front_and_back',))\n\n self.method = method\n self.grid = grid\n\n def set_data(self, image=None, **kwds):\n if image is not None:\n self._data = image\n self._texture = None\n super(ImageVisual, self).set_data(**kwds)\n\n @property\n def interpolation(self):\n return self._interpolation\n\n @interpolation.setter\n def interpolation(self, interp):\n self._interpolation = interp\n self.update()\n\n @property\n def size(self):\n return self._data.shape[:2][::-1]\n\n def _build_data(self, transforms):\n # Construct complete data array with position and optionally color\n if transforms.get_full_transform().Linear:\n method = 'subdivide'\n grid = (1, 1)\n else:\n method = self.method\n grid = self.grid\n\n # TODO: subdivision and impostor modes should be handled by new\n # components?\n if method == 'subdivide':\n # quads cover area of image as closely as possible\n w = 1.0 / grid[1]\n h = 1.0 / grid[0]\n\n quad = np.array([[0, 0, 0], [w, 0, 0], [w, h, 0],\n [0, 0, 0], [w, h, 0], [0, h, 0]],\n dtype=np.float32)\n quads = np.empty((grid[1], grid[0], 6, 3), dtype=np.float32)\n quads[:] = quad\n\n mgrid = np.mgrid[0.:grid[1], 0.:grid[0]].transpose(1, 2, 0)\n mgrid = mgrid[:, :, np.newaxis, :]\n mgrid[..., 0] *= w\n mgrid[..., 1] *= h\n\n quads[..., :2] += mgrid\n tex_coords = quads.reshape(grid[1]*grid[0]*6, 3)\n vertices = tex_coords.copy()\n vertices[..., 0] *= self._data.shape[1]\n vertices[..., 1] *= self._data.shape[0]\n ModularMesh.set_data(self, pos=vertices)\n coords = np.ascontiguousarray(tex_coords[:, :2])\n tex_coord_comp = TextureCoordinateComponent(coords)\n elif method == 'impostor':\n # quad covers entire view; frag. shader will deal with image shape\n quad = np.array([[-1, -1, 0], [1, -1, 0], [1, 1, 0],\n [-1, -1, 0], [1, 1, 0], [-1, 1, 0]],\n dtype=np.float32)\n ModularMesh.set_data(self, pos=quad)\n\n self._tex_transform.scale = (1./self._data.shape[0],\n 1./self._data.shape[1])\n ctr = transforms.get_full_transform().inverse\n total_transform = self._tex_transform * ctr\n tex_coord_comp = VertexTextureCoordinateComponent(total_transform)\n tr = NullTransform()\n self._program.vert['map_local_to_nd'] = tr\n else:\n raise ValueError(\"Unknown image draw method '%s'\" % method)\n\n data = self._data\n self._texture = gloo.Texture2D(data)\n self._texture.interpolation = self._interpolation\n\n self.color_components = [TextureComponent(self._texture,\n tex_coord_comp)]\n\n def _activate_transform(self, transforms=None):\n # this is handled in _build_data instead.\n pass\n\n def bounds(self, mode, axis):\n if axis > 1:\n return (0, 0)\n else:\n return (0, self.size[axis])\n\n def draw(self, transforms):\n if self._data is None:\n return\n\n if transforms.get_full_transform().Linear:\n method = 'subdivide'\n else:\n method = self.method\n\n # always have to rebuild for impostor, only first for subdivide\n if self._texture is None:\n self._build_data(transforms)\n if method == 'subdivide':\n tr = transforms.get_full_transform()\n self._program.vert['map_local_to_nd'] = tr\n\n super(ImageVisual, self).draw(transforms)\n", "path": "vispy/visuals/image.py"}]}
| 2,288 | 252 |
gh_patches_debug_2921
|
rasdani/github-patches
|
git_diff
|
netbox-community__netbox-15136
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
API call to add VPN tunnel fails: group field is required
### Deployment Type
Self-hosted
### NetBox Version
v3.7.2
### Python Version
3.11
### Steps to Reproduce
```
$ curl -s -i http://netbox-test.lein.io/api/vpn/tunnels/ \
-H "Authorization: Token 176d4c04ccc8f2a549ea6fd393567d9da5a796ff" \
-H "Content-type: application/json" \
-H "Accept: application/json; indent=4" \
-d '{"name":"TestTunnel", "encapsulation":"ipsec-tunnel", "status":"active"}'
```
### Expected Behavior
Tunnel "TestTunnel" is added successfully.
### Observed Behavior
```
HTTP/1.1 400 Bad Request
API-Version: 3.7
...
{
"group": [
"This field is required."
]
}
```
Adding the same tunnel in GUI is successful (using only those three mandatory fields).
### Workaround
Create a tunnel group like "TEMP", then add `"group":1` (where 1 is the group ID) in the create call, and finally edit the resulted tunnel to remove the TEMP group.
</issue>
<code>
[start of netbox/vpn/api/serializers.py]
1 from django.contrib.contenttypes.models import ContentType
2 from drf_spectacular.utils import extend_schema_field
3 from rest_framework import serializers
4
5 from ipam.api.nested_serializers import NestedIPAddressSerializer, NestedRouteTargetSerializer
6 from ipam.models import RouteTarget
7 from netbox.api.fields import ChoiceField, ContentTypeField, SerializedPKRelatedField
8 from netbox.api.serializers import NetBoxModelSerializer
9 from netbox.constants import NESTED_SERIALIZER_PREFIX
10 from tenancy.api.nested_serializers import NestedTenantSerializer
11 from utilities.api import get_serializer_for_model
12 from vpn.choices import *
13 from vpn.models import *
14 from .nested_serializers import *
15
16 __all__ = (
17 'IKEPolicySerializer',
18 'IKEProposalSerializer',
19 'IPSecPolicySerializer',
20 'IPSecProfileSerializer',
21 'IPSecProposalSerializer',
22 'L2VPNSerializer',
23 'L2VPNTerminationSerializer',
24 'TunnelGroupSerializer',
25 'TunnelSerializer',
26 'TunnelTerminationSerializer',
27 )
28
29
30 class TunnelGroupSerializer(NetBoxModelSerializer):
31 url = serializers.HyperlinkedIdentityField(view_name='vpn-api:tunnelgroup-detail')
32 tunnel_count = serializers.IntegerField(read_only=True)
33
34 class Meta:
35 model = TunnelGroup
36 fields = [
37 'id', 'url', 'display', 'name', 'slug', 'description', 'tags', 'custom_fields', 'created', 'last_updated',
38 'tunnel_count',
39 ]
40
41
42 class TunnelSerializer(NetBoxModelSerializer):
43 url = serializers.HyperlinkedIdentityField(
44 view_name='vpn-api:tunnel-detail'
45 )
46 status = ChoiceField(
47 choices=TunnelStatusChoices
48 )
49 group = NestedTunnelGroupSerializer()
50 encapsulation = ChoiceField(
51 choices=TunnelEncapsulationChoices
52 )
53 ipsec_profile = NestedIPSecProfileSerializer(
54 required=False,
55 allow_null=True
56 )
57 tenant = NestedTenantSerializer(
58 required=False,
59 allow_null=True
60 )
61
62 class Meta:
63 model = Tunnel
64 fields = (
65 'id', 'url', 'display', 'name', 'status', 'group', 'encapsulation', 'ipsec_profile', 'tenant', 'tunnel_id',
66 'description', 'comments', 'tags', 'custom_fields', 'created', 'last_updated',
67 )
68
69
70 class TunnelTerminationSerializer(NetBoxModelSerializer):
71 url = serializers.HyperlinkedIdentityField(
72 view_name='vpn-api:tunneltermination-detail'
73 )
74 tunnel = NestedTunnelSerializer()
75 role = ChoiceField(
76 choices=TunnelTerminationRoleChoices
77 )
78 termination_type = ContentTypeField(
79 queryset=ContentType.objects.all()
80 )
81 termination = serializers.SerializerMethodField(
82 read_only=True
83 )
84 outside_ip = NestedIPAddressSerializer(
85 required=False,
86 allow_null=True
87 )
88
89 class Meta:
90 model = TunnelTermination
91 fields = (
92 'id', 'url', 'display', 'tunnel', 'role', 'termination_type', 'termination_id', 'termination', 'outside_ip',
93 'tags', 'custom_fields', 'created', 'last_updated',
94 )
95
96 @extend_schema_field(serializers.JSONField(allow_null=True))
97 def get_termination(self, obj):
98 serializer = get_serializer_for_model(obj.termination, prefix=NESTED_SERIALIZER_PREFIX)
99 context = {'request': self.context['request']}
100 return serializer(obj.termination, context=context).data
101
102
103 class IKEProposalSerializer(NetBoxModelSerializer):
104 url = serializers.HyperlinkedIdentityField(
105 view_name='vpn-api:ikeproposal-detail'
106 )
107 authentication_method = ChoiceField(
108 choices=AuthenticationMethodChoices
109 )
110 encryption_algorithm = ChoiceField(
111 choices=EncryptionAlgorithmChoices
112 )
113 authentication_algorithm = ChoiceField(
114 choices=AuthenticationAlgorithmChoices
115 )
116 group = ChoiceField(
117 choices=DHGroupChoices
118 )
119
120 class Meta:
121 model = IKEProposal
122 fields = (
123 'id', 'url', 'display', 'name', 'description', 'authentication_method', 'encryption_algorithm',
124 'authentication_algorithm', 'group', 'sa_lifetime', 'comments', 'tags', 'custom_fields', 'created',
125 'last_updated',
126 )
127
128
129 class IKEPolicySerializer(NetBoxModelSerializer):
130 url = serializers.HyperlinkedIdentityField(
131 view_name='vpn-api:ikepolicy-detail'
132 )
133 version = ChoiceField(
134 choices=IKEVersionChoices
135 )
136 mode = ChoiceField(
137 choices=IKEModeChoices
138 )
139 proposals = SerializedPKRelatedField(
140 queryset=IKEProposal.objects.all(),
141 serializer=NestedIKEProposalSerializer,
142 required=False,
143 many=True
144 )
145
146 class Meta:
147 model = IKEPolicy
148 fields = (
149 'id', 'url', 'display', 'name', 'description', 'version', 'mode', 'proposals', 'preshared_key', 'comments',
150 'tags', 'custom_fields', 'created', 'last_updated',
151 )
152
153
154 class IPSecProposalSerializer(NetBoxModelSerializer):
155 url = serializers.HyperlinkedIdentityField(
156 view_name='vpn-api:ipsecproposal-detail'
157 )
158 encryption_algorithm = ChoiceField(
159 choices=EncryptionAlgorithmChoices
160 )
161 authentication_algorithm = ChoiceField(
162 choices=AuthenticationAlgorithmChoices
163 )
164
165 class Meta:
166 model = IPSecProposal
167 fields = (
168 'id', 'url', 'display', 'name', 'description', 'encryption_algorithm', 'authentication_algorithm',
169 'sa_lifetime_seconds', 'sa_lifetime_data', 'comments', 'tags', 'custom_fields', 'created', 'last_updated',
170 )
171
172
173 class IPSecPolicySerializer(NetBoxModelSerializer):
174 url = serializers.HyperlinkedIdentityField(
175 view_name='vpn-api:ipsecpolicy-detail'
176 )
177 proposals = SerializedPKRelatedField(
178 queryset=IPSecProposal.objects.all(),
179 serializer=NestedIPSecProposalSerializer,
180 required=False,
181 many=True
182 )
183 pfs_group = ChoiceField(
184 choices=DHGroupChoices,
185 required=False
186 )
187
188 class Meta:
189 model = IPSecPolicy
190 fields = (
191 'id', 'url', 'display', 'name', 'description', 'proposals', 'pfs_group', 'comments', 'tags',
192 'custom_fields', 'created', 'last_updated',
193 )
194
195
196 class IPSecProfileSerializer(NetBoxModelSerializer):
197 url = serializers.HyperlinkedIdentityField(
198 view_name='vpn-api:ipsecprofile-detail'
199 )
200 mode = ChoiceField(
201 choices=IPSecModeChoices
202 )
203 ike_policy = NestedIKEPolicySerializer()
204 ipsec_policy = NestedIPSecPolicySerializer()
205
206 class Meta:
207 model = IPSecProfile
208 fields = (
209 'id', 'url', 'display', 'name', 'description', 'mode', 'ike_policy', 'ipsec_policy', 'comments', 'tags',
210 'custom_fields', 'created', 'last_updated',
211 )
212
213
214 #
215 # L2VPN
216 #
217
218 class L2VPNSerializer(NetBoxModelSerializer):
219 url = serializers.HyperlinkedIdentityField(view_name='vpn-api:l2vpn-detail')
220 type = ChoiceField(choices=L2VPNTypeChoices, required=False)
221 import_targets = SerializedPKRelatedField(
222 queryset=RouteTarget.objects.all(),
223 serializer=NestedRouteTargetSerializer,
224 required=False,
225 many=True
226 )
227 export_targets = SerializedPKRelatedField(
228 queryset=RouteTarget.objects.all(),
229 serializer=NestedRouteTargetSerializer,
230 required=False,
231 many=True
232 )
233 tenant = NestedTenantSerializer(required=False, allow_null=True)
234
235 class Meta:
236 model = L2VPN
237 fields = [
238 'id', 'url', 'display', 'identifier', 'name', 'slug', 'type', 'import_targets', 'export_targets',
239 'description', 'comments', 'tenant', 'tags', 'custom_fields', 'created', 'last_updated'
240 ]
241
242
243 class L2VPNTerminationSerializer(NetBoxModelSerializer):
244 url = serializers.HyperlinkedIdentityField(view_name='vpn-api:l2vpntermination-detail')
245 l2vpn = NestedL2VPNSerializer()
246 assigned_object_type = ContentTypeField(
247 queryset=ContentType.objects.all()
248 )
249 assigned_object = serializers.SerializerMethodField(read_only=True)
250
251 class Meta:
252 model = L2VPNTermination
253 fields = [
254 'id', 'url', 'display', 'l2vpn', 'assigned_object_type', 'assigned_object_id',
255 'assigned_object', 'tags', 'custom_fields', 'created', 'last_updated'
256 ]
257
258 @extend_schema_field(serializers.JSONField(allow_null=True))
259 def get_assigned_object(self, instance):
260 serializer = get_serializer_for_model(instance.assigned_object, prefix=NESTED_SERIALIZER_PREFIX)
261 context = {'request': self.context['request']}
262 return serializer(instance.assigned_object, context=context).data
263
[end of netbox/vpn/api/serializers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/netbox/vpn/api/serializers.py b/netbox/vpn/api/serializers.py
--- a/netbox/vpn/api/serializers.py
+++ b/netbox/vpn/api/serializers.py
@@ -46,7 +46,10 @@
status = ChoiceField(
choices=TunnelStatusChoices
)
- group = NestedTunnelGroupSerializer()
+ group = NestedTunnelGroupSerializer(
+ required=False,
+ allow_null=True
+ )
encapsulation = ChoiceField(
choices=TunnelEncapsulationChoices
)
|
{"golden_diff": "diff --git a/netbox/vpn/api/serializers.py b/netbox/vpn/api/serializers.py\n--- a/netbox/vpn/api/serializers.py\n+++ b/netbox/vpn/api/serializers.py\n@@ -46,7 +46,10 @@\n status = ChoiceField(\n choices=TunnelStatusChoices\n )\n- group = NestedTunnelGroupSerializer()\n+ group = NestedTunnelGroupSerializer(\n+ required=False,\n+ allow_null=True\n+ )\n encapsulation = ChoiceField(\n choices=TunnelEncapsulationChoices\n )\n", "issue": "API call to add VPN tunnel fails: group field is required\n### Deployment Type\r\n\r\nSelf-hosted\r\n\r\n### NetBox Version\r\n\r\nv3.7.2\r\n\r\n### Python Version\r\n\r\n3.11\r\n\r\n### Steps to Reproduce\r\n\r\n```\r\n$ curl -s -i http://netbox-test.lein.io/api/vpn/tunnels/ \\\r\n-H \"Authorization: Token 176d4c04ccc8f2a549ea6fd393567d9da5a796ff\" \\\r\n-H \"Content-type: application/json\" \\\r\n-H \"Accept: application/json; indent=4\" \\\r\n-d '{\"name\":\"TestTunnel\", \"encapsulation\":\"ipsec-tunnel\", \"status\":\"active\"}'\r\n```\r\n\r\n### Expected Behavior\r\n\r\nTunnel \"TestTunnel\" is added successfully.\r\n\r\n### Observed Behavior\r\n\r\n```\r\nHTTP/1.1 400 Bad Request\r\nAPI-Version: 3.7\r\n...\r\n{\r\n \"group\": [\r\n \"This field is required.\"\r\n ]\r\n}\r\n```\r\n\r\nAdding the same tunnel in GUI is successful (using only those three mandatory fields).\r\n\r\n### Workaround\r\n\r\nCreate a tunnel group like \"TEMP\", then add `\"group\":1` (where 1 is the group ID) in the create call, and finally edit the resulted tunnel to remove the TEMP group.\n", "before_files": [{"content": "from django.contrib.contenttypes.models import ContentType\nfrom drf_spectacular.utils import extend_schema_field\nfrom rest_framework import serializers\n\nfrom ipam.api.nested_serializers import NestedIPAddressSerializer, NestedRouteTargetSerializer\nfrom ipam.models import RouteTarget\nfrom netbox.api.fields import ChoiceField, ContentTypeField, SerializedPKRelatedField\nfrom netbox.api.serializers import NetBoxModelSerializer\nfrom netbox.constants import NESTED_SERIALIZER_PREFIX\nfrom tenancy.api.nested_serializers import NestedTenantSerializer\nfrom utilities.api import get_serializer_for_model\nfrom vpn.choices import *\nfrom vpn.models import *\nfrom .nested_serializers import *\n\n__all__ = (\n 'IKEPolicySerializer',\n 'IKEProposalSerializer',\n 'IPSecPolicySerializer',\n 'IPSecProfileSerializer',\n 'IPSecProposalSerializer',\n 'L2VPNSerializer',\n 'L2VPNTerminationSerializer',\n 'TunnelGroupSerializer',\n 'TunnelSerializer',\n 'TunnelTerminationSerializer',\n)\n\n\nclass TunnelGroupSerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='vpn-api:tunnelgroup-detail')\n tunnel_count = serializers.IntegerField(read_only=True)\n\n class Meta:\n model = TunnelGroup\n fields = [\n 'id', 'url', 'display', 'name', 'slug', 'description', 'tags', 'custom_fields', 'created', 'last_updated',\n 'tunnel_count',\n ]\n\n\nclass TunnelSerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(\n view_name='vpn-api:tunnel-detail'\n )\n status = ChoiceField(\n choices=TunnelStatusChoices\n )\n group = NestedTunnelGroupSerializer()\n encapsulation = ChoiceField(\n choices=TunnelEncapsulationChoices\n )\n ipsec_profile = NestedIPSecProfileSerializer(\n required=False,\n allow_null=True\n )\n tenant = NestedTenantSerializer(\n required=False,\n allow_null=True\n )\n\n class Meta:\n model = Tunnel\n fields = (\n 'id', 'url', 'display', 'name', 'status', 'group', 'encapsulation', 'ipsec_profile', 'tenant', 'tunnel_id',\n 'description', 'comments', 'tags', 'custom_fields', 'created', 'last_updated',\n )\n\n\nclass TunnelTerminationSerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(\n view_name='vpn-api:tunneltermination-detail'\n )\n tunnel = NestedTunnelSerializer()\n role = ChoiceField(\n choices=TunnelTerminationRoleChoices\n )\n termination_type = ContentTypeField(\n queryset=ContentType.objects.all()\n )\n termination = serializers.SerializerMethodField(\n read_only=True\n )\n outside_ip = NestedIPAddressSerializer(\n required=False,\n allow_null=True\n )\n\n class Meta:\n model = TunnelTermination\n fields = (\n 'id', 'url', 'display', 'tunnel', 'role', 'termination_type', 'termination_id', 'termination', 'outside_ip',\n 'tags', 'custom_fields', 'created', 'last_updated',\n )\n\n @extend_schema_field(serializers.JSONField(allow_null=True))\n def get_termination(self, obj):\n serializer = get_serializer_for_model(obj.termination, prefix=NESTED_SERIALIZER_PREFIX)\n context = {'request': self.context['request']}\n return serializer(obj.termination, context=context).data\n\n\nclass IKEProposalSerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(\n view_name='vpn-api:ikeproposal-detail'\n )\n authentication_method = ChoiceField(\n choices=AuthenticationMethodChoices\n )\n encryption_algorithm = ChoiceField(\n choices=EncryptionAlgorithmChoices\n )\n authentication_algorithm = ChoiceField(\n choices=AuthenticationAlgorithmChoices\n )\n group = ChoiceField(\n choices=DHGroupChoices\n )\n\n class Meta:\n model = IKEProposal\n fields = (\n 'id', 'url', 'display', 'name', 'description', 'authentication_method', 'encryption_algorithm',\n 'authentication_algorithm', 'group', 'sa_lifetime', 'comments', 'tags', 'custom_fields', 'created',\n 'last_updated',\n )\n\n\nclass IKEPolicySerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(\n view_name='vpn-api:ikepolicy-detail'\n )\n version = ChoiceField(\n choices=IKEVersionChoices\n )\n mode = ChoiceField(\n choices=IKEModeChoices\n )\n proposals = SerializedPKRelatedField(\n queryset=IKEProposal.objects.all(),\n serializer=NestedIKEProposalSerializer,\n required=False,\n many=True\n )\n\n class Meta:\n model = IKEPolicy\n fields = (\n 'id', 'url', 'display', 'name', 'description', 'version', 'mode', 'proposals', 'preshared_key', 'comments',\n 'tags', 'custom_fields', 'created', 'last_updated',\n )\n\n\nclass IPSecProposalSerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(\n view_name='vpn-api:ipsecproposal-detail'\n )\n encryption_algorithm = ChoiceField(\n choices=EncryptionAlgorithmChoices\n )\n authentication_algorithm = ChoiceField(\n choices=AuthenticationAlgorithmChoices\n )\n\n class Meta:\n model = IPSecProposal\n fields = (\n 'id', 'url', 'display', 'name', 'description', 'encryption_algorithm', 'authentication_algorithm',\n 'sa_lifetime_seconds', 'sa_lifetime_data', 'comments', 'tags', 'custom_fields', 'created', 'last_updated',\n )\n\n\nclass IPSecPolicySerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(\n view_name='vpn-api:ipsecpolicy-detail'\n )\n proposals = SerializedPKRelatedField(\n queryset=IPSecProposal.objects.all(),\n serializer=NestedIPSecProposalSerializer,\n required=False,\n many=True\n )\n pfs_group = ChoiceField(\n choices=DHGroupChoices,\n required=False\n )\n\n class Meta:\n model = IPSecPolicy\n fields = (\n 'id', 'url', 'display', 'name', 'description', 'proposals', 'pfs_group', 'comments', 'tags',\n 'custom_fields', 'created', 'last_updated',\n )\n\n\nclass IPSecProfileSerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(\n view_name='vpn-api:ipsecprofile-detail'\n )\n mode = ChoiceField(\n choices=IPSecModeChoices\n )\n ike_policy = NestedIKEPolicySerializer()\n ipsec_policy = NestedIPSecPolicySerializer()\n\n class Meta:\n model = IPSecProfile\n fields = (\n 'id', 'url', 'display', 'name', 'description', 'mode', 'ike_policy', 'ipsec_policy', 'comments', 'tags',\n 'custom_fields', 'created', 'last_updated',\n )\n\n\n#\n# L2VPN\n#\n\nclass L2VPNSerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='vpn-api:l2vpn-detail')\n type = ChoiceField(choices=L2VPNTypeChoices, required=False)\n import_targets = SerializedPKRelatedField(\n queryset=RouteTarget.objects.all(),\n serializer=NestedRouteTargetSerializer,\n required=False,\n many=True\n )\n export_targets = SerializedPKRelatedField(\n queryset=RouteTarget.objects.all(),\n serializer=NestedRouteTargetSerializer,\n required=False,\n many=True\n )\n tenant = NestedTenantSerializer(required=False, allow_null=True)\n\n class Meta:\n model = L2VPN\n fields = [\n 'id', 'url', 'display', 'identifier', 'name', 'slug', 'type', 'import_targets', 'export_targets',\n 'description', 'comments', 'tenant', 'tags', 'custom_fields', 'created', 'last_updated'\n ]\n\n\nclass L2VPNTerminationSerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='vpn-api:l2vpntermination-detail')\n l2vpn = NestedL2VPNSerializer()\n assigned_object_type = ContentTypeField(\n queryset=ContentType.objects.all()\n )\n assigned_object = serializers.SerializerMethodField(read_only=True)\n\n class Meta:\n model = L2VPNTermination\n fields = [\n 'id', 'url', 'display', 'l2vpn', 'assigned_object_type', 'assigned_object_id',\n 'assigned_object', 'tags', 'custom_fields', 'created', 'last_updated'\n ]\n\n @extend_schema_field(serializers.JSONField(allow_null=True))\n def get_assigned_object(self, instance):\n serializer = get_serializer_for_model(instance.assigned_object, prefix=NESTED_SERIALIZER_PREFIX)\n context = {'request': self.context['request']}\n return serializer(instance.assigned_object, context=context).data\n", "path": "netbox/vpn/api/serializers.py"}]}
| 3,442 | 127 |
gh_patches_debug_40661
|
rasdani/github-patches
|
git_diff
|
StackStorm__st2-4122
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Dynamic Configuration values don't work with nested objects
I use the following cmd to set a key in the datastore.
`$ st2 key set -e smtp_account.me.password "I'm Encrypted"`
and here are my email pack config /opt/stackstorm/configs/email.yaml
```yaml
---
smtp_accounts:
- name: name
password: "{{ st2kv.system.smtp_account.me.password }}"
```
(followed by the rest of the account configuration).
This is based on the email pack's `config.schema.yaml`, which contains this:
```yaml
imap_accounts:
description: "IMAP accounts"
type: "array"
required: true
items:
type: "object"
required: true
properties:
name:
description: "Name of the account"
type: "string"
secret: false
required: true
server:
description: "Email server name - e.g. imap.gmail.com"
type: "string"
secret: false
required: true
username:
description: "Mailbox username"
type: "string"
required: true
password:
description: "Mailbox password."
type: "string"
secret: true
required: true
```
Running the email action fails:
```
Traceback (most recent call last):
File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2common/runners/python_action_wrapper.py", line 278, in <module>
obj.run()
File "/opt/stackstorm/st2/lib/python2.7/site-packages/st2common/runners/python_action_wrapper.py", line 171, in run
output = action.run(**self._parameters)
File "/opt/stackstorm/packs/email/actions/send_email.py", line 49, in run
s.login(account_data['username'], account_data['password'])
File "/usr/lib64/python2.7/smtplib.py", line 621, in login
raise SMTPAuthenticationError(code, resp)
smtplib.SMTPAuthenticationError: (460, 'ERR.LOGIN.PASSERR')
```
I added some log to see the password value in File "/opt/stackstorm/packs/email/actions/send_email.py", line 49.
It seems that we get
`{{ st2kv.system.smtp_account.me.password }}` as the password.
FYI
If I run the send email action and use `{{ st2kv.system.smtp_account.me.password }}` as the message
I can see the message's value is correct.
</issue>
<code>
[start of st2common/st2common/util/config_loader.py]
1 # Licensed to the StackStorm, Inc ('StackStorm') under one or more
2 # contributor license agreements. See the NOTICE file distributed with
3 # this work for additional information regarding copyright ownership.
4 # The ASF licenses this file to You under the Apache License, Version 2.0
5 # (the "License"); you may not use this file except in compliance with
6 # the License. You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 from __future__ import absolute_import
17 import copy
18
19 import six
20
21 from oslo_config import cfg
22
23 from st2common import log as logging
24 from st2common.models.db.pack import ConfigDB
25 from st2common.persistence.pack import ConfigSchema
26 from st2common.persistence.pack import Config
27 from st2common.content import utils as content_utils
28 from st2common.util import jinja as jinja_utils
29 from st2common.util.templating import render_template_with_system_and_user_context
30 from st2common.util.config_parser import ContentPackConfigParser
31 from st2common.exceptions.db import StackStormDBObjectNotFoundError
32
33 __all__ = [
34 'ContentPackConfigLoader'
35 ]
36
37 LOG = logging.getLogger(__name__)
38
39
40 class ContentPackConfigLoader(object):
41 """
42 Class which loads and resolves all the config values and returns a dictionary of resolved values
43 which can be passed to the resource.
44
45 It loads and resolves values in the following order:
46
47 1. Static values from <pack path>/config.yaml file
48 2. Dynamic and or static values from /opt/stackstorm/configs/<pack name>.yaml file.
49
50 Values are merged from left to right which means values from "<pack name>.yaml" file have
51 precedence and override values from pack local config file.
52 """
53
54 def __init__(self, pack_name, user=None):
55 self.pack_name = pack_name
56 self.user = user or cfg.CONF.system_user.user
57
58 self.pack_path = content_utils.get_pack_base_path(pack_name=pack_name)
59 self._config_parser = ContentPackConfigParser(pack_name=pack_name)
60
61 def get_config(self):
62 result = {}
63
64 # Retrieve corresponding ConfigDB and ConfigSchemaDB object
65 # Note: ConfigSchemaDB is optional right now. If it doesn't exist, we assume every value
66 # is of a type string
67 try:
68 config_db = Config.get_by_pack(value=self.pack_name)
69 except StackStormDBObjectNotFoundError:
70 # Corresponding pack config doesn't exist. We set config_db to an empty config so
71 # that the default values from config schema are still correctly applied even if
72 # pack doesn't contain a config.
73 config_db = ConfigDB(pack=self.pack_name, values={})
74
75 try:
76 config_schema_db = ConfigSchema.get_by_pack(value=self.pack_name)
77 except StackStormDBObjectNotFoundError:
78 config_schema_db = None
79
80 # 2. Retrieve values from "global" pack config file (if available) and resolve them if
81 # necessary
82 config = self._get_values_for_config(config_schema_db=config_schema_db,
83 config_db=config_db)
84 result.update(config)
85
86 return result
87
88 def _get_values_for_config(self, config_schema_db, config_db):
89 schema_values = getattr(config_schema_db, 'attributes', {})
90 config_values = getattr(config_db, 'values', {})
91
92 config = copy.deepcopy(config_values)
93
94 # Assign dynamic config values based on the values in the datastore
95 config = self._assign_dynamic_config_values(schema=schema_values, config=config)
96
97 # If config_schema is available we do a second pass and set default values for required
98 # items which values are not provided / available in the config itself
99 config = self._assign_default_values(schema=schema_values, config=config)
100 return config
101
102 def _assign_dynamic_config_values(self, schema, config, parent_keys=None):
103 """
104 Assign dynamic config value for a particular config item if the ite utilizes a Jinja
105 expression for dynamic config values.
106
107 Note: This method mutates config argument in place.
108
109 :rtype: ``dict``
110 """
111 parent_keys = parent_keys or []
112
113 for config_item_key, config_item_value in six.iteritems(config):
114 schema_item = schema.get(config_item_key, {})
115 is_dictionary = isinstance(config_item_value, dict)
116
117 # Inspect nested object properties
118 if is_dictionary:
119 parent_keys += [config_item_key]
120 self._assign_dynamic_config_values(schema=schema_item.get('properties', {}),
121 config=config[config_item_key],
122 parent_keys=parent_keys)
123 else:
124 is_jinja_expression = jinja_utils.is_jinja_expression(value=config_item_value)
125
126 if is_jinja_expression:
127 # Resolve / render the Jinja template expression
128 full_config_item_key = '.'.join(parent_keys + [config_item_key])
129 value = self._get_datastore_value_for_expression(key=full_config_item_key,
130 value=config_item_value,
131 config_schema_item=schema_item)
132
133 config[config_item_key] = value
134 else:
135 # Static value, no resolution needed
136 config[config_item_key] = config_item_value
137
138 return config
139
140 def _assign_default_values(self, schema, config):
141 """
142 Assign default values for particular config if default values are provided in the config
143 schema and a value is not specified in the config.
144
145 Note: This method mutates config argument in place.
146
147 :rtype: ``dict``
148 """
149 for schema_item_key, schema_item in six.iteritems(schema):
150 has_default_value = 'default' in schema_item
151 has_config_value = schema_item_key in config
152
153 default_value = schema_item.get('default', None)
154 is_object = schema_item.get('type', None) == 'object'
155 has_properties = schema_item.get('properties', None)
156
157 if has_default_value and not has_config_value:
158 # Config value is not provided, but default value is, use a default value
159 config[schema_item_key] = default_value
160
161 # Inspect nested object properties
162 if is_object and has_properties:
163 if not config.get(schema_item_key, None):
164 config[schema_item_key] = {}
165
166 self._assign_default_values(schema=schema_item['properties'],
167 config=config[schema_item_key])
168
169 return config
170
171 def _get_datastore_value_for_expression(self, key, value, config_schema_item=None):
172 """
173 Retrieve datastore value by first resolving the datastore expression and then retrieving
174 the value from the datastore.
175
176 :param key: Full path to the config item key (e.g. "token" / "auth.settings.token", etc.)
177 """
178 from st2common.services.config import deserialize_key_value
179
180 config_schema_item = config_schema_item or {}
181 secret = config_schema_item.get('secret', False)
182
183 try:
184 value = render_template_with_system_and_user_context(value=value,
185 user=self.user)
186 except Exception as e:
187 # Throw a more user-friendly exception on failed render
188 exc_class = type(e)
189 original_msg = str(e)
190 msg = ('Failed to render dynamic configuration value for key "%s" with value '
191 '"%s" for pack "%s" config: %s ' % (key, value, self.pack_name, original_msg))
192 raise exc_class(msg)
193
194 if value:
195 # Deserialize the value
196 value = deserialize_key_value(value=value, secret=secret)
197 else:
198 value = None
199
200 return value
201
202
203 def get_config(pack, user):
204 """Returns config for given pack and user.
205 """
206 LOG.debug('Attempting to get config')
207 if pack and user:
208 LOG.debug('Pack and user found. Loading config.')
209 config_loader = ContentPackConfigLoader(
210 pack_name=pack,
211 user=user
212 )
213
214 config = config_loader.get_config()
215
216 else:
217 config = {}
218
219 LOG.debug('Config: %s', config)
220
221 return config
222
[end of st2common/st2common/util/config_loader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/st2common/st2common/util/config_loader.py b/st2common/st2common/util/config_loader.py
--- a/st2common/st2common/util/config_loader.py
+++ b/st2common/st2common/util/config_loader.py
@@ -110,22 +110,42 @@
"""
parent_keys = parent_keys or []
- for config_item_key, config_item_value in six.iteritems(config):
- schema_item = schema.get(config_item_key, {})
+ config_is_dict = isinstance(config, dict)
+ config_is_list = isinstance(config, list)
+ iterator = six.iteritems(config) if config_is_dict else enumerate(config)
+
+ # config_item_key - if config_is_dict then this is the key in the dictionary
+ # if config_is_list then this is the index of them item
+ # config_item_value - the value of the key/index for the current item
+ for config_item_key, config_item_value in iterator:
+ if config_is_dict:
+ # different schema for each key/value pair
+ schema_item = schema.get(config_item_key, {})
+ if config_is_list:
+ # same schema is shared between every item in the list
+ schema_item = schema
+
is_dictionary = isinstance(config_item_value, dict)
+ is_list = isinstance(config_item_value, list)
# Inspect nested object properties
if is_dictionary:
- parent_keys += [config_item_key]
+ parent_keys += [str(config_item_key)]
self._assign_dynamic_config_values(schema=schema_item.get('properties', {}),
config=config[config_item_key],
parent_keys=parent_keys)
+ # Inspect nested list items
+ elif is_list:
+ parent_keys += [str(config_item_key)]
+ self._assign_dynamic_config_values(schema=schema_item.get('items', {}),
+ config=config[config_item_key],
+ parent_keys=parent_keys)
else:
is_jinja_expression = jinja_utils.is_jinja_expression(value=config_item_value)
if is_jinja_expression:
# Resolve / render the Jinja template expression
- full_config_item_key = '.'.join(parent_keys + [config_item_key])
+ full_config_item_key = '.'.join(parent_keys + [str(config_item_key)])
value = self._get_datastore_value_for_expression(key=full_config_item_key,
value=config_item_value,
config_schema_item=schema_item)
@@ -188,8 +208,9 @@
exc_class = type(e)
original_msg = str(e)
msg = ('Failed to render dynamic configuration value for key "%s" with value '
- '"%s" for pack "%s" config: %s ' % (key, value, self.pack_name, original_msg))
- raise exc_class(msg)
+ '"%s" for pack "%s" config: %s %s ' % (key, value, self.pack_name,
+ exc_class, original_msg))
+ raise RuntimeError(msg)
if value:
# Deserialize the value
|
{"golden_diff": "diff --git a/st2common/st2common/util/config_loader.py b/st2common/st2common/util/config_loader.py\n--- a/st2common/st2common/util/config_loader.py\n+++ b/st2common/st2common/util/config_loader.py\n@@ -110,22 +110,42 @@\n \"\"\"\n parent_keys = parent_keys or []\n \n- for config_item_key, config_item_value in six.iteritems(config):\n- schema_item = schema.get(config_item_key, {})\n+ config_is_dict = isinstance(config, dict)\n+ config_is_list = isinstance(config, list)\n+ iterator = six.iteritems(config) if config_is_dict else enumerate(config)\n+\n+ # config_item_key - if config_is_dict then this is the key in the dictionary\n+ # if config_is_list then this is the index of them item\n+ # config_item_value - the value of the key/index for the current item\n+ for config_item_key, config_item_value in iterator:\n+ if config_is_dict:\n+ # different schema for each key/value pair\n+ schema_item = schema.get(config_item_key, {})\n+ if config_is_list:\n+ # same schema is shared between every item in the list\n+ schema_item = schema\n+\n is_dictionary = isinstance(config_item_value, dict)\n+ is_list = isinstance(config_item_value, list)\n \n # Inspect nested object properties\n if is_dictionary:\n- parent_keys += [config_item_key]\n+ parent_keys += [str(config_item_key)]\n self._assign_dynamic_config_values(schema=schema_item.get('properties', {}),\n config=config[config_item_key],\n parent_keys=parent_keys)\n+ # Inspect nested list items\n+ elif is_list:\n+ parent_keys += [str(config_item_key)]\n+ self._assign_dynamic_config_values(schema=schema_item.get('items', {}),\n+ config=config[config_item_key],\n+ parent_keys=parent_keys)\n else:\n is_jinja_expression = jinja_utils.is_jinja_expression(value=config_item_value)\n \n if is_jinja_expression:\n # Resolve / render the Jinja template expression\n- full_config_item_key = '.'.join(parent_keys + [config_item_key])\n+ full_config_item_key = '.'.join(parent_keys + [str(config_item_key)])\n value = self._get_datastore_value_for_expression(key=full_config_item_key,\n value=config_item_value,\n config_schema_item=schema_item)\n@@ -188,8 +208,9 @@\n exc_class = type(e)\n original_msg = str(e)\n msg = ('Failed to render dynamic configuration value for key \"%s\" with value '\n- '\"%s\" for pack \"%s\" config: %s ' % (key, value, self.pack_name, original_msg))\n- raise exc_class(msg)\n+ '\"%s\" for pack \"%s\" config: %s %s ' % (key, value, self.pack_name,\n+ exc_class, original_msg))\n+ raise RuntimeError(msg)\n \n if value:\n # Deserialize the value\n", "issue": "Dynamic Configuration values don't work with nested objects\nI use the following cmd to set a key in the datastore.\r\n`$ st2 key set -e smtp_account.me.password \"I'm Encrypted\"`\r\n\r\nand here are my email pack config /opt/stackstorm/configs/email.yaml\r\n\r\n```yaml\r\n---\r\nsmtp_accounts:\r\n - name: name\r\n password: \"{{ st2kv.system.smtp_account.me.password }}\"\r\n```\r\n(followed by the rest of the account configuration).\r\n\r\nThis is based on the email pack's `config.schema.yaml`, which contains this:\r\n\r\n```yaml\r\nimap_accounts:\r\n description: \"IMAP accounts\"\r\n type: \"array\"\r\n required: true\r\n items:\r\n type: \"object\"\r\n required: true\r\n properties:\r\n name:\r\n description: \"Name of the account\"\r\n type: \"string\"\r\n secret: false\r\n required: true\r\n server:\r\n description: \"Email server name - e.g. imap.gmail.com\"\r\n type: \"string\"\r\n secret: false\r\n required: true\r\n username:\r\n description: \"Mailbox username\"\r\n type: \"string\"\r\n required: true\r\n password:\r\n description: \"Mailbox password.\"\r\n type: \"string\"\r\n secret: true\r\n required: true\r\n```\r\n\r\nRunning the email action fails:\r\n```\r\nTraceback (most recent call last):\r\n File \"/opt/stackstorm/st2/lib/python2.7/site-packages/st2common/runners/python_action_wrapper.py\", line 278, in <module>\r\n obj.run()\r\n File \"/opt/stackstorm/st2/lib/python2.7/site-packages/st2common/runners/python_action_wrapper.py\", line 171, in run\r\n output = action.run(**self._parameters)\r\n File \"/opt/stackstorm/packs/email/actions/send_email.py\", line 49, in run\r\n s.login(account_data['username'], account_data['password'])\r\n File \"/usr/lib64/python2.7/smtplib.py\", line 621, in login\r\n raise SMTPAuthenticationError(code, resp)\r\nsmtplib.SMTPAuthenticationError: (460, 'ERR.LOGIN.PASSERR')\r\n```\r\n\r\nI added some log to see the password value in File \"/opt/stackstorm/packs/email/actions/send_email.py\", line 49.\r\nIt seems that we get\r\n`{{ st2kv.system.smtp_account.me.password }}` as the password.\r\n\r\nFYI\r\nIf I run the send email action and use `{{ st2kv.system.smtp_account.me.password }}` as the message\r\nI can see the message's value is correct.\n", "before_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nimport copy\n\nimport six\n\nfrom oslo_config import cfg\n\nfrom st2common import log as logging\nfrom st2common.models.db.pack import ConfigDB\nfrom st2common.persistence.pack import ConfigSchema\nfrom st2common.persistence.pack import Config\nfrom st2common.content import utils as content_utils\nfrom st2common.util import jinja as jinja_utils\nfrom st2common.util.templating import render_template_with_system_and_user_context\nfrom st2common.util.config_parser import ContentPackConfigParser\nfrom st2common.exceptions.db import StackStormDBObjectNotFoundError\n\n__all__ = [\n 'ContentPackConfigLoader'\n]\n\nLOG = logging.getLogger(__name__)\n\n\nclass ContentPackConfigLoader(object):\n \"\"\"\n Class which loads and resolves all the config values and returns a dictionary of resolved values\n which can be passed to the resource.\n\n It loads and resolves values in the following order:\n\n 1. Static values from <pack path>/config.yaml file\n 2. Dynamic and or static values from /opt/stackstorm/configs/<pack name>.yaml file.\n\n Values are merged from left to right which means values from \"<pack name>.yaml\" file have\n precedence and override values from pack local config file.\n \"\"\"\n\n def __init__(self, pack_name, user=None):\n self.pack_name = pack_name\n self.user = user or cfg.CONF.system_user.user\n\n self.pack_path = content_utils.get_pack_base_path(pack_name=pack_name)\n self._config_parser = ContentPackConfigParser(pack_name=pack_name)\n\n def get_config(self):\n result = {}\n\n # Retrieve corresponding ConfigDB and ConfigSchemaDB object\n # Note: ConfigSchemaDB is optional right now. If it doesn't exist, we assume every value\n # is of a type string\n try:\n config_db = Config.get_by_pack(value=self.pack_name)\n except StackStormDBObjectNotFoundError:\n # Corresponding pack config doesn't exist. We set config_db to an empty config so\n # that the default values from config schema are still correctly applied even if\n # pack doesn't contain a config.\n config_db = ConfigDB(pack=self.pack_name, values={})\n\n try:\n config_schema_db = ConfigSchema.get_by_pack(value=self.pack_name)\n except StackStormDBObjectNotFoundError:\n config_schema_db = None\n\n # 2. Retrieve values from \"global\" pack config file (if available) and resolve them if\n # necessary\n config = self._get_values_for_config(config_schema_db=config_schema_db,\n config_db=config_db)\n result.update(config)\n\n return result\n\n def _get_values_for_config(self, config_schema_db, config_db):\n schema_values = getattr(config_schema_db, 'attributes', {})\n config_values = getattr(config_db, 'values', {})\n\n config = copy.deepcopy(config_values)\n\n # Assign dynamic config values based on the values in the datastore\n config = self._assign_dynamic_config_values(schema=schema_values, config=config)\n\n # If config_schema is available we do a second pass and set default values for required\n # items which values are not provided / available in the config itself\n config = self._assign_default_values(schema=schema_values, config=config)\n return config\n\n def _assign_dynamic_config_values(self, schema, config, parent_keys=None):\n \"\"\"\n Assign dynamic config value for a particular config item if the ite utilizes a Jinja\n expression for dynamic config values.\n\n Note: This method mutates config argument in place.\n\n :rtype: ``dict``\n \"\"\"\n parent_keys = parent_keys or []\n\n for config_item_key, config_item_value in six.iteritems(config):\n schema_item = schema.get(config_item_key, {})\n is_dictionary = isinstance(config_item_value, dict)\n\n # Inspect nested object properties\n if is_dictionary:\n parent_keys += [config_item_key]\n self._assign_dynamic_config_values(schema=schema_item.get('properties', {}),\n config=config[config_item_key],\n parent_keys=parent_keys)\n else:\n is_jinja_expression = jinja_utils.is_jinja_expression(value=config_item_value)\n\n if is_jinja_expression:\n # Resolve / render the Jinja template expression\n full_config_item_key = '.'.join(parent_keys + [config_item_key])\n value = self._get_datastore_value_for_expression(key=full_config_item_key,\n value=config_item_value,\n config_schema_item=schema_item)\n\n config[config_item_key] = value\n else:\n # Static value, no resolution needed\n config[config_item_key] = config_item_value\n\n return config\n\n def _assign_default_values(self, schema, config):\n \"\"\"\n Assign default values for particular config if default values are provided in the config\n schema and a value is not specified in the config.\n\n Note: This method mutates config argument in place.\n\n :rtype: ``dict``\n \"\"\"\n for schema_item_key, schema_item in six.iteritems(schema):\n has_default_value = 'default' in schema_item\n has_config_value = schema_item_key in config\n\n default_value = schema_item.get('default', None)\n is_object = schema_item.get('type', None) == 'object'\n has_properties = schema_item.get('properties', None)\n\n if has_default_value and not has_config_value:\n # Config value is not provided, but default value is, use a default value\n config[schema_item_key] = default_value\n\n # Inspect nested object properties\n if is_object and has_properties:\n if not config.get(schema_item_key, None):\n config[schema_item_key] = {}\n\n self._assign_default_values(schema=schema_item['properties'],\n config=config[schema_item_key])\n\n return config\n\n def _get_datastore_value_for_expression(self, key, value, config_schema_item=None):\n \"\"\"\n Retrieve datastore value by first resolving the datastore expression and then retrieving\n the value from the datastore.\n\n :param key: Full path to the config item key (e.g. \"token\" / \"auth.settings.token\", etc.)\n \"\"\"\n from st2common.services.config import deserialize_key_value\n\n config_schema_item = config_schema_item or {}\n secret = config_schema_item.get('secret', False)\n\n try:\n value = render_template_with_system_and_user_context(value=value,\n user=self.user)\n except Exception as e:\n # Throw a more user-friendly exception on failed render\n exc_class = type(e)\n original_msg = str(e)\n msg = ('Failed to render dynamic configuration value for key \"%s\" with value '\n '\"%s\" for pack \"%s\" config: %s ' % (key, value, self.pack_name, original_msg))\n raise exc_class(msg)\n\n if value:\n # Deserialize the value\n value = deserialize_key_value(value=value, secret=secret)\n else:\n value = None\n\n return value\n\n\ndef get_config(pack, user):\n \"\"\"Returns config for given pack and user.\n \"\"\"\n LOG.debug('Attempting to get config')\n if pack and user:\n LOG.debug('Pack and user found. Loading config.')\n config_loader = ContentPackConfigLoader(\n pack_name=pack,\n user=user\n )\n\n config = config_loader.get_config()\n\n else:\n config = {}\n\n LOG.debug('Config: %s', config)\n\n return config\n", "path": "st2common/st2common/util/config_loader.py"}]}
| 3,438 | 665 |
gh_patches_debug_6480
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-2714
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
LogEmitterProvider.force_flush hangs and ultimately timeouts randomly
**Describe your environment**
Python 3.6.8
Opentelemetry Python 1.7.1
Opentelemetry Collector 0.38.0
**Steps to reproduce**
Note that this is a random behavior.
Regularly call LogEmitterProvider.force_flush while emitting logs.
We're using the BatchLogProcessor with schedule_delay_millis=200
**What is the expected behavior?**
No hanging
**What is the actual behavior?**
Hanging randomly
**Additional context**
</issue>
<code>
[start of opentelemetry-sdk/src/opentelemetry/sdk/_logs/export/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import abc
16 import collections
17 import enum
18 import logging
19 import os
20 import sys
21 import threading
22 from os import linesep
23 from typing import IO, Callable, Deque, List, Optional, Sequence
24
25 from opentelemetry.context import attach, detach, set_value
26 from opentelemetry.sdk._logs import LogData, LogProcessor, LogRecord
27 from opentelemetry.util._time import _time_ns
28
29 _logger = logging.getLogger(__name__)
30
31
32 class LogExportResult(enum.Enum):
33 SUCCESS = 0
34 FAILURE = 1
35
36
37 class LogExporter(abc.ABC):
38 """Interface for exporting logs.
39
40 Interface to be implemented by services that want to export logs received
41 in their own format.
42
43 To export data this MUST be registered to the :class`opentelemetry.sdk._logs.LogEmitter` using a
44 log processor.
45 """
46
47 @abc.abstractmethod
48 def export(self, batch: Sequence[LogData]):
49 """Exports a batch of logs.
50
51 Args:
52 batch: The list of `LogData` objects to be exported
53
54 Returns:
55 The result of the export
56 """
57
58 @abc.abstractmethod
59 def shutdown(self):
60 """Shuts down the exporter.
61
62 Called when the SDK is shut down.
63 """
64
65
66 class ConsoleLogExporter(LogExporter):
67 """Implementation of :class:`LogExporter` that prints log records to the
68 console.
69
70 This class can be used for diagnostic purposes. It prints the exported
71 log records to the console STDOUT.
72 """
73
74 def __init__(
75 self,
76 out: IO = sys.stdout,
77 formatter: Callable[[LogRecord], str] = lambda record: record.to_json()
78 + linesep,
79 ):
80 self.out = out
81 self.formatter = formatter
82
83 def export(self, batch: Sequence[LogData]):
84 for data in batch:
85 self.out.write(self.formatter(data.log_record))
86 self.out.flush()
87 return LogExportResult.SUCCESS
88
89 def shutdown(self):
90 pass
91
92
93 class SimpleLogProcessor(LogProcessor):
94 """This is an implementation of LogProcessor which passes
95 received logs in the export-friendly LogData representation to the
96 configured LogExporter, as soon as they are emitted.
97 """
98
99 def __init__(self, exporter: LogExporter):
100 self._exporter = exporter
101 self._shutdown = False
102
103 def emit(self, log_data: LogData):
104 if self._shutdown:
105 _logger.warning("Processor is already shutdown, ignoring call")
106 return
107 token = attach(set_value("suppress_instrumentation", True))
108 try:
109 self._exporter.export((log_data,))
110 except Exception: # pylint: disable=broad-except
111 _logger.exception("Exception while exporting logs.")
112 detach(token)
113
114 def shutdown(self):
115 self._shutdown = True
116 self._exporter.shutdown()
117
118 def force_flush(
119 self, timeout_millis: int = 30000
120 ) -> bool: # pylint: disable=no-self-use
121 return True
122
123
124 class _FlushRequest:
125 __slots__ = ["event", "num_log_records"]
126
127 def __init__(self):
128 self.event = threading.Event()
129 self.num_log_records = 0
130
131
132 class BatchLogProcessor(LogProcessor):
133 """This is an implementation of LogProcessor which creates batches of
134 received logs in the export-friendly LogData representation and
135 send to the configured LogExporter, as soon as they are emitted.
136 """
137
138 def __init__(
139 self,
140 exporter: LogExporter,
141 schedule_delay_millis: int = 5000,
142 max_export_batch_size: int = 512,
143 export_timeout_millis: int = 30000,
144 ):
145 self._exporter = exporter
146 self._schedule_delay_millis = schedule_delay_millis
147 self._max_export_batch_size = max_export_batch_size
148 self._export_timeout_millis = export_timeout_millis
149 self._queue = collections.deque() # type: Deque[LogData]
150 self._worker_thread = threading.Thread(target=self.worker, daemon=True)
151 self._condition = threading.Condition(threading.Lock())
152 self._shutdown = False
153 self._flush_request = None # type: Optional[_FlushRequest]
154 self._log_records = [
155 None
156 ] * self._max_export_batch_size # type: List[Optional[LogData]]
157 self._worker_thread.start()
158 # Only available in *nix since py37.
159 if hasattr(os, "register_at_fork"):
160 os.register_at_fork(
161 after_in_child=self._at_fork_reinit
162 ) # pylint: disable=protected-access
163
164 def _at_fork_reinit(self):
165 self._condition = threading.Condition(threading.Lock())
166 self._queue.clear()
167 self._worker_thread = threading.Thread(target=self.worker, daemon=True)
168 self._worker_thread.start()
169
170 def worker(self):
171 timeout = self._schedule_delay_millis / 1e3
172 flush_request = None # type: Optional[_FlushRequest]
173 while not self._shutdown:
174 with self._condition:
175 if self._shutdown:
176 # shutdown may have been called, avoid further processing
177 break
178 flush_request = self._get_and_unset_flush_request()
179 if (
180 len(self._queue) < self._max_export_batch_size
181 and self._flush_request is None
182 ):
183 self._condition.wait(timeout)
184
185 flush_request = self._get_and_unset_flush_request()
186 if not self._queue:
187 timeout = self._schedule_delay_millis / 1e3
188 self._notify_flush_request_finished(flush_request)
189 flush_request = None
190 continue
191 if self._shutdown:
192 break
193
194 start_ns = _time_ns()
195 self._export(flush_request)
196 end_ns = _time_ns()
197 # subtract the duration of this export call to the next timeout
198 timeout = self._schedule_delay_millis / 1e3 - (
199 (end_ns - start_ns) / 1e9
200 )
201
202 self._notify_flush_request_finished(flush_request)
203 flush_request = None
204
205 # there might have been a new flush request while export was running
206 # and before the done flag switched to true
207 with self._condition:
208 shutdown_flush_request = self._get_and_unset_flush_request()
209
210 # flush the remaining logs
211 self._drain_queue()
212 self._notify_flush_request_finished(flush_request)
213 self._notify_flush_request_finished(shutdown_flush_request)
214
215 def _export(self, flush_request: Optional[_FlushRequest] = None):
216 """Exports logs considering the given flush_request.
217
218 If flush_request is not None then logs are exported in batches
219 until the number of exported logs reached or exceeded the num of logs in
220 flush_request, otherwise exports at max max_export_batch_size logs.
221 """
222 if flush_request is None:
223 self._export_batch()
224 return
225
226 num_log_records = flush_request.num_log_records
227 while self._queue:
228 exported = self._export_batch()
229 num_log_records -= exported
230
231 if num_log_records <= 0:
232 break
233
234 def _export_batch(self) -> int:
235 """Exports at most max_export_batch_size logs and returns the number of
236 exported logs.
237 """
238 idx = 0
239 while idx < self._max_export_batch_size and self._queue:
240 record = self._queue.pop()
241 self._log_records[idx] = record
242 idx += 1
243 token = attach(set_value("suppress_instrumentation", True))
244 try:
245 self._exporter.export(self._log_records[:idx]) # type: ignore
246 except Exception: # pylint: disable=broad-except
247 _logger.exception("Exception while exporting logs.")
248 detach(token)
249
250 for index in range(idx):
251 self._log_records[index] = None
252 return idx
253
254 def _drain_queue(self):
255 """Export all elements until queue is empty.
256
257 Can only be called from the worker thread context because it invokes
258 `export` that is not thread safe.
259 """
260 while self._queue:
261 self._export_batch()
262
263 def _get_and_unset_flush_request(self) -> Optional[_FlushRequest]:
264 flush_request = self._flush_request
265 self._flush_request = None
266 if flush_request is not None:
267 flush_request.num_log_records = len(self._queue)
268 return flush_request
269
270 @staticmethod
271 def _notify_flush_request_finished(
272 flush_request: Optional[_FlushRequest] = None,
273 ):
274 if flush_request is not None:
275 flush_request.event.set()
276
277 def _get_or_create_flush_request(self) -> _FlushRequest:
278 if self._flush_request is None:
279 self._flush_request = _FlushRequest()
280 return self._flush_request
281
282 def emit(self, log_data: LogData) -> None:
283 """Adds the `LogData` to queue and notifies the waiting threads
284 when size of queue reaches max_export_batch_size.
285 """
286 if self._shutdown:
287 return
288 self._queue.appendleft(log_data)
289 if len(self._queue) >= self._max_export_batch_size:
290 with self._condition:
291 self._condition.notify()
292
293 def shutdown(self):
294 self._shutdown = True
295 with self._condition:
296 self._condition.notify_all()
297 self._worker_thread.join()
298 self._exporter.shutdown()
299
300 def force_flush(self, timeout_millis: Optional[int] = None) -> bool:
301 if timeout_millis is None:
302 timeout_millis = self._export_timeout_millis
303 if self._shutdown:
304 return True
305
306 with self._condition:
307 flush_request = self._get_or_create_flush_request()
308 self._condition.notify_all()
309
310 ret = flush_request.event.wait(timeout_millis / 1e3)
311 if not ret:
312 _logger.warning("Timeout was exceeded in force_flush().")
313 return ret
314
[end of opentelemetry-sdk/src/opentelemetry/sdk/_logs/export/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_logs/export/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/_logs/export/__init__.py
--- a/opentelemetry-sdk/src/opentelemetry/sdk/_logs/export/__init__.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/_logs/export/__init__.py
@@ -178,7 +178,7 @@
flush_request = self._get_and_unset_flush_request()
if (
len(self._queue) < self._max_export_batch_size
- and self._flush_request is None
+ and flush_request is None
):
self._condition.wait(timeout)
|
{"golden_diff": "diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_logs/export/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/_logs/export/__init__.py\n--- a/opentelemetry-sdk/src/opentelemetry/sdk/_logs/export/__init__.py\n+++ b/opentelemetry-sdk/src/opentelemetry/sdk/_logs/export/__init__.py\n@@ -178,7 +178,7 @@\n flush_request = self._get_and_unset_flush_request()\n if (\n len(self._queue) < self._max_export_batch_size\n- and self._flush_request is None\n+ and flush_request is None\n ):\n self._condition.wait(timeout)\n", "issue": "LogEmitterProvider.force_flush hangs and ultimately timeouts randomly\n**Describe your environment** \r\nPython 3.6.8\r\nOpentelemetry Python 1.7.1\r\nOpentelemetry Collector 0.38.0\r\n\r\n**Steps to reproduce**\r\nNote that this is a random behavior.\r\nRegularly call LogEmitterProvider.force_flush while emitting logs.\r\nWe're using the BatchLogProcessor with schedule_delay_millis=200\r\n\r\n**What is the expected behavior?**\r\nNo hanging\r\n\r\n**What is the actual behavior?**\r\nHanging randomly\r\n\r\n**Additional context**\r\n\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport abc\nimport collections\nimport enum\nimport logging\nimport os\nimport sys\nimport threading\nfrom os import linesep\nfrom typing import IO, Callable, Deque, List, Optional, Sequence\n\nfrom opentelemetry.context import attach, detach, set_value\nfrom opentelemetry.sdk._logs import LogData, LogProcessor, LogRecord\nfrom opentelemetry.util._time import _time_ns\n\n_logger = logging.getLogger(__name__)\n\n\nclass LogExportResult(enum.Enum):\n SUCCESS = 0\n FAILURE = 1\n\n\nclass LogExporter(abc.ABC):\n \"\"\"Interface for exporting logs.\n\n Interface to be implemented by services that want to export logs received\n in their own format.\n\n To export data this MUST be registered to the :class`opentelemetry.sdk._logs.LogEmitter` using a\n log processor.\n \"\"\"\n\n @abc.abstractmethod\n def export(self, batch: Sequence[LogData]):\n \"\"\"Exports a batch of logs.\n\n Args:\n batch: The list of `LogData` objects to be exported\n\n Returns:\n The result of the export\n \"\"\"\n\n @abc.abstractmethod\n def shutdown(self):\n \"\"\"Shuts down the exporter.\n\n Called when the SDK is shut down.\n \"\"\"\n\n\nclass ConsoleLogExporter(LogExporter):\n \"\"\"Implementation of :class:`LogExporter` that prints log records to the\n console.\n\n This class can be used for diagnostic purposes. It prints the exported\n log records to the console STDOUT.\n \"\"\"\n\n def __init__(\n self,\n out: IO = sys.stdout,\n formatter: Callable[[LogRecord], str] = lambda record: record.to_json()\n + linesep,\n ):\n self.out = out\n self.formatter = formatter\n\n def export(self, batch: Sequence[LogData]):\n for data in batch:\n self.out.write(self.formatter(data.log_record))\n self.out.flush()\n return LogExportResult.SUCCESS\n\n def shutdown(self):\n pass\n\n\nclass SimpleLogProcessor(LogProcessor):\n \"\"\"This is an implementation of LogProcessor which passes\n received logs in the export-friendly LogData representation to the\n configured LogExporter, as soon as they are emitted.\n \"\"\"\n\n def __init__(self, exporter: LogExporter):\n self._exporter = exporter\n self._shutdown = False\n\n def emit(self, log_data: LogData):\n if self._shutdown:\n _logger.warning(\"Processor is already shutdown, ignoring call\")\n return\n token = attach(set_value(\"suppress_instrumentation\", True))\n try:\n self._exporter.export((log_data,))\n except Exception: # pylint: disable=broad-except\n _logger.exception(\"Exception while exporting logs.\")\n detach(token)\n\n def shutdown(self):\n self._shutdown = True\n self._exporter.shutdown()\n\n def force_flush(\n self, timeout_millis: int = 30000\n ) -> bool: # pylint: disable=no-self-use\n return True\n\n\nclass _FlushRequest:\n __slots__ = [\"event\", \"num_log_records\"]\n\n def __init__(self):\n self.event = threading.Event()\n self.num_log_records = 0\n\n\nclass BatchLogProcessor(LogProcessor):\n \"\"\"This is an implementation of LogProcessor which creates batches of\n received logs in the export-friendly LogData representation and\n send to the configured LogExporter, as soon as they are emitted.\n \"\"\"\n\n def __init__(\n self,\n exporter: LogExporter,\n schedule_delay_millis: int = 5000,\n max_export_batch_size: int = 512,\n export_timeout_millis: int = 30000,\n ):\n self._exporter = exporter\n self._schedule_delay_millis = schedule_delay_millis\n self._max_export_batch_size = max_export_batch_size\n self._export_timeout_millis = export_timeout_millis\n self._queue = collections.deque() # type: Deque[LogData]\n self._worker_thread = threading.Thread(target=self.worker, daemon=True)\n self._condition = threading.Condition(threading.Lock())\n self._shutdown = False\n self._flush_request = None # type: Optional[_FlushRequest]\n self._log_records = [\n None\n ] * self._max_export_batch_size # type: List[Optional[LogData]]\n self._worker_thread.start()\n # Only available in *nix since py37.\n if hasattr(os, \"register_at_fork\"):\n os.register_at_fork(\n after_in_child=self._at_fork_reinit\n ) # pylint: disable=protected-access\n\n def _at_fork_reinit(self):\n self._condition = threading.Condition(threading.Lock())\n self._queue.clear()\n self._worker_thread = threading.Thread(target=self.worker, daemon=True)\n self._worker_thread.start()\n\n def worker(self):\n timeout = self._schedule_delay_millis / 1e3\n flush_request = None # type: Optional[_FlushRequest]\n while not self._shutdown:\n with self._condition:\n if self._shutdown:\n # shutdown may have been called, avoid further processing\n break\n flush_request = self._get_and_unset_flush_request()\n if (\n len(self._queue) < self._max_export_batch_size\n and self._flush_request is None\n ):\n self._condition.wait(timeout)\n\n flush_request = self._get_and_unset_flush_request()\n if not self._queue:\n timeout = self._schedule_delay_millis / 1e3\n self._notify_flush_request_finished(flush_request)\n flush_request = None\n continue\n if self._shutdown:\n break\n\n start_ns = _time_ns()\n self._export(flush_request)\n end_ns = _time_ns()\n # subtract the duration of this export call to the next timeout\n timeout = self._schedule_delay_millis / 1e3 - (\n (end_ns - start_ns) / 1e9\n )\n\n self._notify_flush_request_finished(flush_request)\n flush_request = None\n\n # there might have been a new flush request while export was running\n # and before the done flag switched to true\n with self._condition:\n shutdown_flush_request = self._get_and_unset_flush_request()\n\n # flush the remaining logs\n self._drain_queue()\n self._notify_flush_request_finished(flush_request)\n self._notify_flush_request_finished(shutdown_flush_request)\n\n def _export(self, flush_request: Optional[_FlushRequest] = None):\n \"\"\"Exports logs considering the given flush_request.\n\n If flush_request is not None then logs are exported in batches\n until the number of exported logs reached or exceeded the num of logs in\n flush_request, otherwise exports at max max_export_batch_size logs.\n \"\"\"\n if flush_request is None:\n self._export_batch()\n return\n\n num_log_records = flush_request.num_log_records\n while self._queue:\n exported = self._export_batch()\n num_log_records -= exported\n\n if num_log_records <= 0:\n break\n\n def _export_batch(self) -> int:\n \"\"\"Exports at most max_export_batch_size logs and returns the number of\n exported logs.\n \"\"\"\n idx = 0\n while idx < self._max_export_batch_size and self._queue:\n record = self._queue.pop()\n self._log_records[idx] = record\n idx += 1\n token = attach(set_value(\"suppress_instrumentation\", True))\n try:\n self._exporter.export(self._log_records[:idx]) # type: ignore\n except Exception: # pylint: disable=broad-except\n _logger.exception(\"Exception while exporting logs.\")\n detach(token)\n\n for index in range(idx):\n self._log_records[index] = None\n return idx\n\n def _drain_queue(self):\n \"\"\"Export all elements until queue is empty.\n\n Can only be called from the worker thread context because it invokes\n `export` that is not thread safe.\n \"\"\"\n while self._queue:\n self._export_batch()\n\n def _get_and_unset_flush_request(self) -> Optional[_FlushRequest]:\n flush_request = self._flush_request\n self._flush_request = None\n if flush_request is not None:\n flush_request.num_log_records = len(self._queue)\n return flush_request\n\n @staticmethod\n def _notify_flush_request_finished(\n flush_request: Optional[_FlushRequest] = None,\n ):\n if flush_request is not None:\n flush_request.event.set()\n\n def _get_or_create_flush_request(self) -> _FlushRequest:\n if self._flush_request is None:\n self._flush_request = _FlushRequest()\n return self._flush_request\n\n def emit(self, log_data: LogData) -> None:\n \"\"\"Adds the `LogData` to queue and notifies the waiting threads\n when size of queue reaches max_export_batch_size.\n \"\"\"\n if self._shutdown:\n return\n self._queue.appendleft(log_data)\n if len(self._queue) >= self._max_export_batch_size:\n with self._condition:\n self._condition.notify()\n\n def shutdown(self):\n self._shutdown = True\n with self._condition:\n self._condition.notify_all()\n self._worker_thread.join()\n self._exporter.shutdown()\n\n def force_flush(self, timeout_millis: Optional[int] = None) -> bool:\n if timeout_millis is None:\n timeout_millis = self._export_timeout_millis\n if self._shutdown:\n return True\n\n with self._condition:\n flush_request = self._get_or_create_flush_request()\n self._condition.notify_all()\n\n ret = flush_request.event.wait(timeout_millis / 1e3)\n if not ret:\n _logger.warning(\"Timeout was exceeded in force_flush().\")\n return ret\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/_logs/export/__init__.py"}]}
| 3,811 | 146 |
gh_patches_debug_30971
|
rasdani/github-patches
|
git_diff
|
aws__aws-cli-755
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
s3 sync from local to s3 bucket re-uploads files with version 1.3.6
It is probably related to #718 but it is still happening on my OSX Mavericks with Python 2.7. I sync local files to S3 using "aws s3 sync" but some files always want to re-upload. I did a debug and comparator thinks that "file does not exist at destination".
I even removed all scripts and python packages for awscli, botocore and jmespath, and then reinstalled latest 1.3.6 but no luck. I use awscli for s3 sync since version 1.0 without any issues and have only started to experiences such issues starting with 1.3.2.
Appreciate if you could look into it. Let me know if additional information was needed.
</issue>
<code>
[start of awscli/customizations/s3/utils.py]
1 # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 from datetime import datetime
14 import mimetypes
15 import hashlib
16 import math
17 import os
18 import sys
19 from collections import namedtuple, deque
20 from functools import partial
21
22 from six import PY3
23 from six.moves import queue
24 from dateutil.parser import parse
25 from dateutil.tz import tzlocal
26 from botocore.compat import unquote_str
27
28 from awscli.customizations.s3.constants import MAX_PARTS
29 from awscli.customizations.s3.constants import MAX_SINGLE_UPLOAD_SIZE
30
31
32 class MD5Error(Exception):
33 """
34 Exception for md5's that do not match.
35 """
36 pass
37
38
39 class StablePriorityQueue(queue.Queue):
40 """Priority queue that maintains FIFO order for same priority items.
41
42 This class was written to handle the tasks created in
43 awscli.customizations.s3.tasks, but it's possible to use this
44 class outside of that context. In order for this to be the case,
45 the following conditions should be met:
46
47 * Objects that are queued should have a PRIORITY attribute.
48 This should be an integer value not to exceed the max_priority
49 value passed into the ``__init__``. Objects with lower
50 priority numbers are retrieved before objects with higher
51 priority numbers.
52 * A relatively small max_priority should be chosen. ``get()``
53 calls are O(max_priority).
54
55 Any object that does not have a ``PRIORITY`` attribute or whose
56 priority exceeds ``max_priority`` will be queued at the highest
57 (least important) priority available.
58
59 """
60 def __init__(self, maxsize=0, max_priority=20):
61 queue.Queue.__init__(self, maxsize=maxsize)
62 self.priorities = [deque([]) for i in range(max_priority + 1)]
63 self.default_priority = max_priority
64
65 def _qsize(self):
66 size = 0
67 for bucket in self.priorities:
68 size += len(bucket)
69 return size
70
71 def _put(self, item):
72 priority = min(getattr(item, 'PRIORITY', self.default_priority),
73 self.default_priority)
74 self.priorities[priority].append(item)
75
76 def _get(self):
77 for bucket in self.priorities:
78 if not bucket:
79 continue
80 return bucket.popleft()
81
82
83 def find_bucket_key(s3_path):
84 """
85 This is a helper function that given an s3 path such that the path is of
86 the form: bucket/key
87 It will return the bucket and the key represented by the s3 path
88 """
89 s3_components = s3_path.split('/')
90 bucket = s3_components[0]
91 s3_key = ""
92 if len(s3_components) > 1:
93 s3_key = '/'.join(s3_components[1:])
94 return bucket, s3_key
95
96
97 def split_s3_bucket_key(s3_path):
98 """Split s3 path into bucket and key prefix.
99
100 This will also handle the s3:// prefix.
101
102 :return: Tuple of ('bucketname', 'keyname')
103
104 """
105 if s3_path.startswith('s3://'):
106 s3_path = s3_path[5:]
107 return find_bucket_key(s3_path)
108
109
110 def get_file_stat(path):
111 """
112 This is a helper function that given a local path return the size of
113 the file in bytes and time of last modification.
114 """
115 stats = os.stat(path)
116 update_time = datetime.fromtimestamp(stats.st_mtime, tzlocal())
117 return stats.st_size, update_time
118
119
120 def check_etag(etag, fileobj):
121 """
122 This fucntion checks the etag and the md5 checksum to ensure no
123 data was corrupted upon transfer.
124 """
125 get_chunk = partial(fileobj.read, 1024 * 1024)
126 m = hashlib.md5()
127 for chunk in iter(get_chunk, b''):
128 m.update(chunk)
129 if '-' not in etag:
130 if etag != m.hexdigest():
131 raise MD5Error
132
133
134 def check_error(response_data):
135 """
136 A helper function that prints out the error message recieved in the
137 response_data and raises an error when there is an error.
138 """
139 if response_data:
140 if 'Errors' in response_data:
141 errors = response_data['Errors']
142 for error in errors:
143 raise Exception("Error: %s\n" % error['Message'])
144
145
146 def operate(service, cmd, kwargs):
147 """
148 A helper function that universally calls any command by taking in the
149 service, name of the command, and any additional parameters required in
150 the call.
151 """
152 operation = service.get_operation(cmd)
153 http_response, response_data = operation.call(**kwargs)
154 check_error(response_data)
155 return response_data, http_response
156
157
158 def find_chunksize(size, current_chunksize):
159 """
160 The purpose of this function is determine a chunksize so that
161 the number of parts in a multipart upload is not greater than
162 the ``MAX_PARTS``. If the ``chunksize`` is greater than
163 ``MAX_SINGLE_UPLOAD_SIZE`` it returns ``MAX_SINGLE_UPLOAD_SIZE``.
164 """
165 chunksize = current_chunksize
166 num_parts = int(math.ceil(size / float(chunksize)))
167 while num_parts > MAX_PARTS:
168 chunksize *= 2
169 num_parts = int(math.ceil(size / float(chunksize)))
170 if chunksize > MAX_SINGLE_UPLOAD_SIZE:
171 return MAX_SINGLE_UPLOAD_SIZE
172 else:
173 return chunksize
174
175
176 class MultiCounter(object):
177 """
178 This class is used as a way to keep track of how many multipart
179 operations are in progress. It also is used to track how many
180 part operations are occuring.
181 """
182 def __init__(self):
183 self.count = 0
184
185
186 def uni_print(statement):
187 """
188 This function is used to properly write unicode to stdout. It
189 ensures that the proper encoding is used if the statement is
190 not in a version type of string. The initial check is to
191 allow if ``sys.stdout`` does not use an encoding
192 """
193 encoding = getattr(sys.stdout, 'encoding', None)
194 if encoding is not None and not PY3:
195 sys.stdout.write(statement.encode(sys.stdout.encoding))
196 else:
197 try:
198 sys.stdout.write(statement)
199 except UnicodeEncodeError:
200 # Some file like objects like cStringIO will
201 # try to decode as ascii. Interestingly enough
202 # this works with a normal StringIO.
203 sys.stdout.write(statement.encode('utf-8'))
204
205
206 def guess_content_type(filename):
207 """Given a filename, guess it's content type.
208
209 If the type cannot be guessed, a value of None is returned.
210 """
211 return mimetypes.guess_type(filename)[0]
212
213
214 def relative_path(filename, start=os.path.curdir):
215 """Cross platform relative path of a filename.
216
217 If no relative path can be calculated (i.e different
218 drives on Windows), then instead of raising a ValueError,
219 the absolute path is returned.
220
221 """
222 try:
223 dirname, basename = os.path.split(filename)
224 relative_dir = os.path.relpath(dirname, start)
225 return os.path.join(relative_dir, basename)
226 except ValueError:
227 return os.path.abspath(filename)
228
229
230 class ReadFileChunk(object):
231 def __init__(self, filename, start_byte, size):
232 self._filename = filename
233 self._start_byte = start_byte
234 self._fileobj = open(self._filename, 'rb')
235 self._size = self._calculate_file_size(self._fileobj, requested_size=size,
236 start_byte=start_byte)
237 self._fileobj.seek(self._start_byte)
238 self._amount_read = 0
239
240 def _calculate_file_size(self, fileobj, requested_size, start_byte):
241 actual_file_size = os.fstat(fileobj.fileno()).st_size
242 max_chunk_size = actual_file_size - start_byte
243 return min(max_chunk_size, requested_size)
244
245 def read(self, amount=None):
246 if amount is None:
247 remaining = self._size - self._amount_read
248 data = self._fileobj.read(remaining)
249 self._amount_read += remaining
250 return data
251 else:
252 actual_amount = min(self._size - self._amount_read, amount)
253 data = self._fileobj.read(actual_amount)
254 self._amount_read += actual_amount
255 return data
256
257 def seek(self, where):
258 self._fileobj.seek(self._start_byte + where)
259 self._amount_read = where
260
261 def close(self):
262 self._fileobj.close()
263
264 def tell(self):
265 return self._amount_read
266
267 def __len__(self):
268 # __len__ is defined because requests will try to determine the length
269 # of the stream to set a content length. In the normal case
270 # of the file it will just stat the file, but we need to change that
271 # behavior. By providing a __len__, requests will use that instead
272 # of stat'ing the file.
273 return self._size
274
275 def __enter__(self):
276 return self
277
278 def __exit__(self, *args, **kwargs):
279 self._fileobj.close()
280
281 def __iter__(self):
282 # This is a workaround for http://bugs.python.org/issue17575
283 # Basically httplib will try to iterate over the contents, even
284 # if its a file like object. This wasn't noticed because we've
285 # already exhausted the stream so iterating over the file immediately
286 # steps, which is what we're simulating here.
287 return iter([])
288
289
290 def _date_parser(date_string):
291 return parse(date_string).astimezone(tzlocal())
292
293
294 class BucketLister(object):
295 """List keys in a bucket."""
296 def __init__(self, operation, endpoint, date_parser=_date_parser):
297 self._operation = operation
298 self._endpoint = endpoint
299 self._date_parser = date_parser
300
301 def list_objects(self, bucket, prefix=None):
302 kwargs = {'bucket': bucket, 'encoding_type': 'url'}
303 if prefix is not None:
304 kwargs['prefix'] = prefix
305 pages = self._operation.paginate(self._endpoint, **kwargs)
306 for response, page in pages:
307 contents = page['Contents']
308 for content in contents:
309 source_path = bucket + '/' + unquote_str(content['Key'])
310 size = content['Size']
311 last_update = self._date_parser(content['LastModified'])
312 yield source_path, size, last_update
313
314
315 IORequest = namedtuple('IORequest', ['filename', 'offset', 'data'])
316 # Used to signal that IO for the filename is finished, and that
317 # any associated resources may be cleaned up.
318 IOCloseRequest = namedtuple('IOCloseRequest', ['filename'])
319
[end of awscli/customizations/s3/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/awscli/customizations/s3/utils.py b/awscli/customizations/s3/utils.py
--- a/awscli/customizations/s3/utils.py
+++ b/awscli/customizations/s3/utils.py
@@ -302,14 +302,39 @@
kwargs = {'bucket': bucket, 'encoding_type': 'url'}
if prefix is not None:
kwargs['prefix'] = prefix
- pages = self._operation.paginate(self._endpoint, **kwargs)
- for response, page in pages:
- contents = page['Contents']
- for content in contents:
- source_path = bucket + '/' + unquote_str(content['Key'])
- size = content['Size']
- last_update = self._date_parser(content['LastModified'])
- yield source_path, size, last_update
+ # This event handler is needed because we use encoding_type url and
+ # we're paginating. The pagination token is the last Key of the
+ # Contents list. However, botocore does not know that the encoding
+ # type needs to be urldecoded.
+ with ScopedEventHandler(self._operation.session, 'after-call.s3.ListObjects',
+ self._decode_keys):
+ pages = self._operation.paginate(self._endpoint, **kwargs)
+ for response, page in pages:
+ contents = page['Contents']
+ for content in contents:
+ source_path = bucket + '/' + content['Key']
+ size = content['Size']
+ last_update = self._date_parser(content['LastModified'])
+ yield source_path, size, last_update
+
+ def _decode_keys(self, parsed, **kwargs):
+ for content in parsed['Contents']:
+ content['Key'] = unquote_str(content['Key'])
+
+
+class ScopedEventHandler(object):
+ """Register an event callback for the duration of a scope."""
+
+ def __init__(self, session, event_name, handler):
+ self._session = session
+ self._event_name = event_name
+ self._handler = handler
+
+ def __enter__(self):
+ self._session.register(self._event_name, self._handler)
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self._session.unregister(self._event_name, self._handler)
IORequest = namedtuple('IORequest', ['filename', 'offset', 'data'])
|
{"golden_diff": "diff --git a/awscli/customizations/s3/utils.py b/awscli/customizations/s3/utils.py\n--- a/awscli/customizations/s3/utils.py\n+++ b/awscli/customizations/s3/utils.py\n@@ -302,14 +302,39 @@\n kwargs = {'bucket': bucket, 'encoding_type': 'url'}\n if prefix is not None:\n kwargs['prefix'] = prefix\n- pages = self._operation.paginate(self._endpoint, **kwargs)\n- for response, page in pages:\n- contents = page['Contents']\n- for content in contents:\n- source_path = bucket + '/' + unquote_str(content['Key'])\n- size = content['Size']\n- last_update = self._date_parser(content['LastModified'])\n- yield source_path, size, last_update\n+ # This event handler is needed because we use encoding_type url and\n+ # we're paginating. The pagination token is the last Key of the\n+ # Contents list. However, botocore does not know that the encoding\n+ # type needs to be urldecoded.\n+ with ScopedEventHandler(self._operation.session, 'after-call.s3.ListObjects',\n+ self._decode_keys):\n+ pages = self._operation.paginate(self._endpoint, **kwargs)\n+ for response, page in pages:\n+ contents = page['Contents']\n+ for content in contents:\n+ source_path = bucket + '/' + content['Key']\n+ size = content['Size']\n+ last_update = self._date_parser(content['LastModified'])\n+ yield source_path, size, last_update\n+\n+ def _decode_keys(self, parsed, **kwargs):\n+ for content in parsed['Contents']:\n+ content['Key'] = unquote_str(content['Key'])\n+\n+\n+class ScopedEventHandler(object):\n+ \"\"\"Register an event callback for the duration of a scope.\"\"\"\n+\n+ def __init__(self, session, event_name, handler):\n+ self._session = session\n+ self._event_name = event_name\n+ self._handler = handler\n+\n+ def __enter__(self):\n+ self._session.register(self._event_name, self._handler)\n+\n+ def __exit__(self, exc_type, exc_value, traceback):\n+ self._session.unregister(self._event_name, self._handler)\n \n \n IORequest = namedtuple('IORequest', ['filename', 'offset', 'data'])\n", "issue": "s3 sync from local to s3 bucket re-uploads files with version 1.3.6\nIt is probably related to #718 but it is still happening on my OSX Mavericks with Python 2.7. I sync local files to S3 using \"aws s3 sync\" but some files always want to re-upload. I did a debug and comparator thinks that \"file does not exist at destination\".\n\nI even removed all scripts and python packages for awscli, botocore and jmespath, and then reinstalled latest 1.3.6 but no luck. I use awscli for s3 sync since version 1.0 without any issues and have only started to experiences such issues starting with 1.3.2.\n\nAppreciate if you could look into it. Let me know if additional information was needed.\n\n", "before_files": [{"content": "# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nfrom datetime import datetime\nimport mimetypes\nimport hashlib\nimport math\nimport os\nimport sys\nfrom collections import namedtuple, deque\nfrom functools import partial\n\nfrom six import PY3\nfrom six.moves import queue\nfrom dateutil.parser import parse\nfrom dateutil.tz import tzlocal\nfrom botocore.compat import unquote_str\n\nfrom awscli.customizations.s3.constants import MAX_PARTS\nfrom awscli.customizations.s3.constants import MAX_SINGLE_UPLOAD_SIZE\n\n\nclass MD5Error(Exception):\n \"\"\"\n Exception for md5's that do not match.\n \"\"\"\n pass\n\n\nclass StablePriorityQueue(queue.Queue):\n \"\"\"Priority queue that maintains FIFO order for same priority items.\n\n This class was written to handle the tasks created in\n awscli.customizations.s3.tasks, but it's possible to use this\n class outside of that context. In order for this to be the case,\n the following conditions should be met:\n\n * Objects that are queued should have a PRIORITY attribute.\n This should be an integer value not to exceed the max_priority\n value passed into the ``__init__``. Objects with lower\n priority numbers are retrieved before objects with higher\n priority numbers.\n * A relatively small max_priority should be chosen. ``get()``\n calls are O(max_priority).\n\n Any object that does not have a ``PRIORITY`` attribute or whose\n priority exceeds ``max_priority`` will be queued at the highest\n (least important) priority available.\n\n \"\"\"\n def __init__(self, maxsize=0, max_priority=20):\n queue.Queue.__init__(self, maxsize=maxsize)\n self.priorities = [deque([]) for i in range(max_priority + 1)]\n self.default_priority = max_priority\n\n def _qsize(self):\n size = 0\n for bucket in self.priorities:\n size += len(bucket)\n return size\n\n def _put(self, item):\n priority = min(getattr(item, 'PRIORITY', self.default_priority),\n self.default_priority)\n self.priorities[priority].append(item)\n\n def _get(self):\n for bucket in self.priorities:\n if not bucket:\n continue\n return bucket.popleft()\n\n\ndef find_bucket_key(s3_path):\n \"\"\"\n This is a helper function that given an s3 path such that the path is of\n the form: bucket/key\n It will return the bucket and the key represented by the s3 path\n \"\"\"\n s3_components = s3_path.split('/')\n bucket = s3_components[0]\n s3_key = \"\"\n if len(s3_components) > 1:\n s3_key = '/'.join(s3_components[1:])\n return bucket, s3_key\n\n\ndef split_s3_bucket_key(s3_path):\n \"\"\"Split s3 path into bucket and key prefix.\n\n This will also handle the s3:// prefix.\n\n :return: Tuple of ('bucketname', 'keyname')\n\n \"\"\"\n if s3_path.startswith('s3://'):\n s3_path = s3_path[5:]\n return find_bucket_key(s3_path)\n\n\ndef get_file_stat(path):\n \"\"\"\n This is a helper function that given a local path return the size of\n the file in bytes and time of last modification.\n \"\"\"\n stats = os.stat(path)\n update_time = datetime.fromtimestamp(stats.st_mtime, tzlocal())\n return stats.st_size, update_time\n\n\ndef check_etag(etag, fileobj):\n \"\"\"\n This fucntion checks the etag and the md5 checksum to ensure no\n data was corrupted upon transfer.\n \"\"\"\n get_chunk = partial(fileobj.read, 1024 * 1024)\n m = hashlib.md5()\n for chunk in iter(get_chunk, b''):\n m.update(chunk)\n if '-' not in etag:\n if etag != m.hexdigest():\n raise MD5Error\n\n\ndef check_error(response_data):\n \"\"\"\n A helper function that prints out the error message recieved in the\n response_data and raises an error when there is an error.\n \"\"\"\n if response_data:\n if 'Errors' in response_data:\n errors = response_data['Errors']\n for error in errors:\n raise Exception(\"Error: %s\\n\" % error['Message'])\n\n\ndef operate(service, cmd, kwargs):\n \"\"\"\n A helper function that universally calls any command by taking in the\n service, name of the command, and any additional parameters required in\n the call.\n \"\"\"\n operation = service.get_operation(cmd)\n http_response, response_data = operation.call(**kwargs)\n check_error(response_data)\n return response_data, http_response\n\n\ndef find_chunksize(size, current_chunksize):\n \"\"\"\n The purpose of this function is determine a chunksize so that\n the number of parts in a multipart upload is not greater than\n the ``MAX_PARTS``. If the ``chunksize`` is greater than\n ``MAX_SINGLE_UPLOAD_SIZE`` it returns ``MAX_SINGLE_UPLOAD_SIZE``.\n \"\"\"\n chunksize = current_chunksize\n num_parts = int(math.ceil(size / float(chunksize)))\n while num_parts > MAX_PARTS:\n chunksize *= 2\n num_parts = int(math.ceil(size / float(chunksize)))\n if chunksize > MAX_SINGLE_UPLOAD_SIZE:\n return MAX_SINGLE_UPLOAD_SIZE\n else:\n return chunksize\n\n\nclass MultiCounter(object):\n \"\"\"\n This class is used as a way to keep track of how many multipart\n operations are in progress. It also is used to track how many\n part operations are occuring.\n \"\"\"\n def __init__(self):\n self.count = 0\n\n\ndef uni_print(statement):\n \"\"\"\n This function is used to properly write unicode to stdout. It\n ensures that the proper encoding is used if the statement is\n not in a version type of string. The initial check is to\n allow if ``sys.stdout`` does not use an encoding\n \"\"\"\n encoding = getattr(sys.stdout, 'encoding', None)\n if encoding is not None and not PY3:\n sys.stdout.write(statement.encode(sys.stdout.encoding))\n else:\n try:\n sys.stdout.write(statement)\n except UnicodeEncodeError:\n # Some file like objects like cStringIO will\n # try to decode as ascii. Interestingly enough\n # this works with a normal StringIO.\n sys.stdout.write(statement.encode('utf-8'))\n\n\ndef guess_content_type(filename):\n \"\"\"Given a filename, guess it's content type.\n\n If the type cannot be guessed, a value of None is returned.\n \"\"\"\n return mimetypes.guess_type(filename)[0]\n\n\ndef relative_path(filename, start=os.path.curdir):\n \"\"\"Cross platform relative path of a filename.\n\n If no relative path can be calculated (i.e different\n drives on Windows), then instead of raising a ValueError,\n the absolute path is returned.\n\n \"\"\"\n try:\n dirname, basename = os.path.split(filename)\n relative_dir = os.path.relpath(dirname, start)\n return os.path.join(relative_dir, basename)\n except ValueError:\n return os.path.abspath(filename)\n\n\nclass ReadFileChunk(object):\n def __init__(self, filename, start_byte, size):\n self._filename = filename\n self._start_byte = start_byte\n self._fileobj = open(self._filename, 'rb')\n self._size = self._calculate_file_size(self._fileobj, requested_size=size,\n start_byte=start_byte)\n self._fileobj.seek(self._start_byte)\n self._amount_read = 0\n\n def _calculate_file_size(self, fileobj, requested_size, start_byte):\n actual_file_size = os.fstat(fileobj.fileno()).st_size\n max_chunk_size = actual_file_size - start_byte\n return min(max_chunk_size, requested_size)\n\n def read(self, amount=None):\n if amount is None:\n remaining = self._size - self._amount_read\n data = self._fileobj.read(remaining)\n self._amount_read += remaining\n return data\n else:\n actual_amount = min(self._size - self._amount_read, amount)\n data = self._fileobj.read(actual_amount)\n self._amount_read += actual_amount\n return data\n\n def seek(self, where):\n self._fileobj.seek(self._start_byte + where)\n self._amount_read = where\n\n def close(self):\n self._fileobj.close()\n\n def tell(self):\n return self._amount_read\n\n def __len__(self):\n # __len__ is defined because requests will try to determine the length\n # of the stream to set a content length. In the normal case\n # of the file it will just stat the file, but we need to change that\n # behavior. By providing a __len__, requests will use that instead\n # of stat'ing the file.\n return self._size\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args, **kwargs):\n self._fileobj.close()\n\n def __iter__(self):\n # This is a workaround for http://bugs.python.org/issue17575\n # Basically httplib will try to iterate over the contents, even\n # if its a file like object. This wasn't noticed because we've\n # already exhausted the stream so iterating over the file immediately\n # steps, which is what we're simulating here.\n return iter([])\n\n\ndef _date_parser(date_string):\n return parse(date_string).astimezone(tzlocal())\n\n\nclass BucketLister(object):\n \"\"\"List keys in a bucket.\"\"\"\n def __init__(self, operation, endpoint, date_parser=_date_parser):\n self._operation = operation\n self._endpoint = endpoint\n self._date_parser = date_parser\n\n def list_objects(self, bucket, prefix=None):\n kwargs = {'bucket': bucket, 'encoding_type': 'url'}\n if prefix is not None:\n kwargs['prefix'] = prefix\n pages = self._operation.paginate(self._endpoint, **kwargs)\n for response, page in pages:\n contents = page['Contents']\n for content in contents:\n source_path = bucket + '/' + unquote_str(content['Key'])\n size = content['Size']\n last_update = self._date_parser(content['LastModified'])\n yield source_path, size, last_update\n\n\nIORequest = namedtuple('IORequest', ['filename', 'offset', 'data'])\n# Used to signal that IO for the filename is finished, and that\n# any associated resources may be cleaned up.\nIOCloseRequest = namedtuple('IOCloseRequest', ['filename'])\n", "path": "awscli/customizations/s3/utils.py"}]}
| 4,038 | 529 |
gh_patches_debug_18590
|
rasdani/github-patches
|
git_diff
|
celery__celery-3616
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Request on_timeout should ignore soft time limit exception
When Request.on_timeout receive a soft timeout from billiard, it does the same as if it was receiving a hard time limit exception. This is ran by the controller.
But the task may catch this exception and eg. return (this is what soft timeout are for).
This cause:
1. the result to be saved once as an exception by the controller (on_timeout) and another time with the result returned by the task
2. the task status to be passed to failure and to success on the same manner
3. if the task is participating to a chord, the chord result counter (at least with redis) is incremented twice (instead of once), making the chord to return prematurely and eventually loose tasks…
1, 2 and 3 can leads of course to strange race conditions…
## Steps to reproduce (Illustration)
with the program in test_timeout.py:
```python
import time
import celery
app = celery.Celery('test_timeout')
app.conf.update(
result_backend="redis://localhost/0",
broker_url="amqp://celery:celery@localhost:5672/host",
)
@app.task(soft_time_limit=1)
def test():
try:
time.sleep(2)
except Exception:
return 1
@app.task()
def add(args):
print("### adding", args)
return sum(args)
@app.task()
def on_error(context, exception, traceback, **kwargs):
print("### on_error: ", exception)
if __name__ == "__main__":
result = celery.chord([test.s().set(link_error=on_error.s()), test.s().set(link_error=on_error.s())])(add.s())
result.get()
```
start a worker and the program:
```
$ celery -A test_timeout worker -l WARNING
$ python3 test_timeout.py
```
## Expected behavior
add method is called with `[1, 1]` as argument and test_timeout.py return normally
## Actual behavior
The test_timeout.py fails, with
```
celery.backends.base.ChordError: Callback error: ChordError("Dependency 15109e05-da43-449f-9081-85d839ac0ef2 raised SoftTimeLimitExceeded('SoftTimeLimitExceeded(True,)',)",
```
On the worker side, the **on_error is called but the add method as well !**
```
[2017-11-29 23:07:25,538: WARNING/MainProcess] Soft time limit (1s) exceeded for test_timeout.test[15109e05-da43-449f-9081-85d839ac0ef2]
[2017-11-29 23:07:25,546: WARNING/MainProcess] ### on_error:
[2017-11-29 23:07:25,546: WARNING/MainProcess] SoftTimeLimitExceeded(True,)
[2017-11-29 23:07:25,547: WARNING/MainProcess] Soft time limit (1s) exceeded for test_timeout.test[38f3f7f2-4a89-4318-8ee9-36a987f73757]
[2017-11-29 23:07:25,553: ERROR/MainProcess] Chord callback for 'ef6d7a38-d1b4-40ad-b937-ffa84e40bb23' raised: ChordError("Dependency 15109e05-da43-449f-9081-85d839ac0ef2 raised SoftTimeLimitExceeded('SoftTimeLimitExceeded(True,)',)",)
Traceback (most recent call last):
File "/usr/local/lib/python3.4/dist-packages/celery/backends/redis.py", line 290, in on_chord_part_return
callback.delay([unpack(tup, decode) for tup in resl])
File "/usr/local/lib/python3.4/dist-packages/celery/backends/redis.py", line 290, in <listcomp>
callback.delay([unpack(tup, decode) for tup in resl])
File "/usr/local/lib/python3.4/dist-packages/celery/backends/redis.py", line 243, in _unpack_chord_result
raise ChordError('Dependency {0} raised {1!r}'.format(tid, retval))
celery.exceptions.ChordError: Dependency 15109e05-da43-449f-9081-85d839ac0ef2 raised SoftTimeLimitExceeded('SoftTimeLimitExceeded(True,)',)
[2017-11-29 23:07:25,565: WARNING/MainProcess] ### on_error:
[2017-11-29 23:07:25,565: WARNING/MainProcess] SoftTimeLimitExceeded(True,)
[2017-11-29 23:07:27,262: WARNING/PoolWorker-2] ### adding
[2017-11-29 23:07:27,264: WARNING/PoolWorker-2] [1, 1]
```
Of course, on purpose did I choose to call the test.s() twice, to show that the count in the chord continues. In fact:
- the chord result is incremented twice by the error of soft time limit
- the chord result is again incremented twice by the correct returning of `test` task
## Conclusion
Request.on_timeout should not process soft time limit exception.
here is a quick monkey patch (correction of celery is trivial)
```python
def patch_celery_request_on_timeout():
from celery.worker import request
orig = request.Request.on_timeout
def patched_on_timeout(self, soft, timeout):
if not soft:
orig(self, soft, timeout)
request.Request.on_timeout = patched_on_timeout
patch_celery_request_on_timeout()
```
## version info
software -> celery:4.1.0 (latentcall) kombu:4.0.2 py:3.4.3
billiard:3.5.0.2 py-amqp:2.1.4
platform -> system:Linux arch:64bit, ELF imp:CPython
loader -> celery.loaders.app.AppLoader
settings -> transport:amqp results:redis://10.0.3.253/0
</issue>
<code>
[start of celery/app/registry.py]
1 # -*- coding: utf-8 -*-
2 """Registry of available tasks."""
3 from __future__ import absolute_import, unicode_literals
4 import inspect
5 from importlib import import_module
6 from celery._state import get_current_app
7 from celery.exceptions import NotRegistered
8 from celery.five import items
9
10 __all__ = ['TaskRegistry']
11
12
13 class TaskRegistry(dict):
14 """Map of registered tasks."""
15
16 NotRegistered = NotRegistered
17
18 def __missing__(self, key):
19 raise self.NotRegistered(key)
20
21 def register(self, task):
22 """Register a task in the task registry.
23
24 The task will be automatically instantiated if not already an
25 instance.
26 """
27 self[task.name] = inspect.isclass(task) and task() or task
28
29 def unregister(self, name):
30 """Unregister task by name.
31
32 Arguments:
33 name (str): name of the task to unregister, or a
34 :class:`celery.task.base.Task` with a valid `name` attribute.
35
36 Raises:
37 celery.exceptions.NotRegistered: if the task is not registered.
38 """
39 try:
40 self.pop(getattr(name, 'name', name))
41 except KeyError:
42 raise self.NotRegistered(name)
43
44 # -- these methods are irrelevant now and will be removed in 4.0
45 def regular(self):
46 return self.filter_types('regular')
47
48 def periodic(self):
49 return self.filter_types('periodic')
50
51 def filter_types(self, type):
52 return {name: task for name, task in items(self)
53 if getattr(task, 'type', 'regular') == type}
54
55
56 def _unpickle_task(name):
57 return get_current_app().tasks[name]
58
59
60 def _unpickle_task_v2(name, module=None):
61 if module:
62 import_module(module)
63 return get_current_app().tasks[name]
64
[end of celery/app/registry.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/celery/app/registry.py b/celery/app/registry.py
--- a/celery/app/registry.py
+++ b/celery/app/registry.py
@@ -4,7 +4,7 @@
import inspect
from importlib import import_module
from celery._state import get_current_app
-from celery.exceptions import NotRegistered
+from celery.exceptions import NotRegistered, InvalidTaskError
from celery.five import items
__all__ = ['TaskRegistry']
@@ -22,8 +22,10 @@
"""Register a task in the task registry.
The task will be automatically instantiated if not already an
- instance.
+ instance. Name must be configured prior to registration.
"""
+ if task.name is None:
+ raise InvalidTaskError('Task "class {0}" must specify name'.format(task.__class__.__name__))
self[task.name] = inspect.isclass(task) and task() or task
def unregister(self, name):
|
{"golden_diff": "diff --git a/celery/app/registry.py b/celery/app/registry.py\n--- a/celery/app/registry.py\n+++ b/celery/app/registry.py\n@@ -4,7 +4,7 @@\n import inspect\n from importlib import import_module\n from celery._state import get_current_app\n-from celery.exceptions import NotRegistered\n+from celery.exceptions import NotRegistered, InvalidTaskError\n from celery.five import items\n \n __all__ = ['TaskRegistry']\n@@ -22,8 +22,10 @@\n \"\"\"Register a task in the task registry.\n \n The task will be automatically instantiated if not already an\n- instance.\n+ instance. Name must be configured prior to registration.\n \"\"\"\n+ if task.name is None:\n+ raise InvalidTaskError('Task \"class {0}\" must specify name'.format(task.__class__.__name__))\n self[task.name] = inspect.isclass(task) and task() or task\n \n def unregister(self, name):\n", "issue": "Request on_timeout should ignore soft time limit exception\nWhen Request.on_timeout receive a soft timeout from billiard, it does the same as if it was receiving a hard time limit exception. This is ran by the controller.\r\n\r\nBut the task may catch this exception and eg. return (this is what soft timeout are for).\r\n\r\nThis cause:\r\n1. the result to be saved once as an exception by the controller (on_timeout) and another time with the result returned by the task\r\n2. the task status to be passed to failure and to success on the same manner\r\n3. if the task is participating to a chord, the chord result counter (at least with redis) is incremented twice (instead of once), making the chord to return prematurely and eventually loose tasks\u2026\r\n\r\n1, 2 and 3 can leads of course to strange race conditions\u2026\r\n\r\n## Steps to reproduce (Illustration)\r\n\r\nwith the program in test_timeout.py:\r\n\r\n```python\r\nimport time\r\nimport celery\r\n\r\n\r\napp = celery.Celery('test_timeout')\r\napp.conf.update(\r\n result_backend=\"redis://localhost/0\",\r\n broker_url=\"amqp://celery:celery@localhost:5672/host\",\r\n)\r\n\r\[email protected](soft_time_limit=1)\r\ndef test():\r\n try:\r\n time.sleep(2)\r\n except Exception:\r\n return 1\r\n\r\[email protected]()\r\ndef add(args):\r\n print(\"### adding\", args)\r\n return sum(args)\r\n\r\[email protected]()\r\ndef on_error(context, exception, traceback, **kwargs):\r\n print(\"### on_error:\u00a0\", exception)\r\n\r\nif __name__ == \"__main__\":\r\n result = celery.chord([test.s().set(link_error=on_error.s()), test.s().set(link_error=on_error.s())])(add.s())\r\n result.get()\r\n```\r\n\r\nstart a worker and the program:\r\n\r\n```\r\n$ celery -A test_timeout worker -l WARNING\r\n$ python3 test_timeout.py\r\n```\r\n\r\n## Expected behavior\r\n\r\nadd method is called with `[1, 1]` as argument and test_timeout.py return normally\r\n\r\n## Actual behavior\r\n\r\nThe test_timeout.py fails, with\r\n```\r\ncelery.backends.base.ChordError: Callback error: ChordError(\"Dependency 15109e05-da43-449f-9081-85d839ac0ef2 raised SoftTimeLimitExceeded('SoftTimeLimitExceeded(True,)',)\",\r\n```\r\nOn the worker side, the **on_error is called but the add method as well !**\r\n\r\n```\r\n[2017-11-29 23:07:25,538: WARNING/MainProcess] Soft time limit (1s) exceeded for test_timeout.test[15109e05-da43-449f-9081-85d839ac0ef2]\r\n[2017-11-29 23:07:25,546: WARNING/MainProcess] ### on_error:\r\n[2017-11-29 23:07:25,546: WARNING/MainProcess] SoftTimeLimitExceeded(True,)\r\n[2017-11-29 23:07:25,547: WARNING/MainProcess] Soft time limit (1s) exceeded for test_timeout.test[38f3f7f2-4a89-4318-8ee9-36a987f73757]\r\n[2017-11-29 23:07:25,553: ERROR/MainProcess] Chord callback for 'ef6d7a38-d1b4-40ad-b937-ffa84e40bb23' raised: ChordError(\"Dependency 15109e05-da43-449f-9081-85d839ac0ef2 raised SoftTimeLimitExceeded('SoftTimeLimitExceeded(True,)',)\",)\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.4/dist-packages/celery/backends/redis.py\", line 290, in on_chord_part_return\r\n callback.delay([unpack(tup, decode) for tup in resl])\r\n File \"/usr/local/lib/python3.4/dist-packages/celery/backends/redis.py\", line 290, in <listcomp>\r\n callback.delay([unpack(tup, decode) for tup in resl])\r\n File \"/usr/local/lib/python3.4/dist-packages/celery/backends/redis.py\", line 243, in _unpack_chord_result\r\n raise ChordError('Dependency {0} raised {1!r}'.format(tid, retval))\r\ncelery.exceptions.ChordError: Dependency 15109e05-da43-449f-9081-85d839ac0ef2 raised SoftTimeLimitExceeded('SoftTimeLimitExceeded(True,)',)\r\n[2017-11-29 23:07:25,565: WARNING/MainProcess] ### on_error:\r\n[2017-11-29 23:07:25,565: WARNING/MainProcess] SoftTimeLimitExceeded(True,)\r\n[2017-11-29 23:07:27,262: WARNING/PoolWorker-2] ### adding\r\n[2017-11-29 23:07:27,264: WARNING/PoolWorker-2] [1, 1]\r\n```\r\n\r\nOf course, on purpose did I choose to call the test.s() twice, to show that the count in the chord continues. In fact:\r\n- the chord result is incremented twice by the error of soft time limit\r\n- the chord result is again incremented twice by the correct returning of `test` task\r\n\r\n## Conclusion\r\n\r\nRequest.on_timeout should not process soft time limit exception. \r\n\r\nhere is a quick monkey patch (correction of celery is trivial)\r\n\r\n```python\r\ndef patch_celery_request_on_timeout():\r\n from celery.worker import request\r\n orig = request.Request.on_timeout\r\n def patched_on_timeout(self, soft, timeout):\r\n if not soft:\r\n orig(self, soft, timeout)\r\n request.Request.on_timeout = patched_on_timeout\r\npatch_celery_request_on_timeout()\r\n```\r\n\r\n\r\n\r\n## version info\r\n\r\nsoftware -> celery:4.1.0 (latentcall) kombu:4.0.2 py:3.4.3\r\n billiard:3.5.0.2 py-amqp:2.1.4\r\nplatform -> system:Linux arch:64bit, ELF imp:CPython\r\nloader -> celery.loaders.app.AppLoader\r\nsettings -> transport:amqp results:redis://10.0.3.253/0\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Registry of available tasks.\"\"\"\nfrom __future__ import absolute_import, unicode_literals\nimport inspect\nfrom importlib import import_module\nfrom celery._state import get_current_app\nfrom celery.exceptions import NotRegistered\nfrom celery.five import items\n\n__all__ = ['TaskRegistry']\n\n\nclass TaskRegistry(dict):\n \"\"\"Map of registered tasks.\"\"\"\n\n NotRegistered = NotRegistered\n\n def __missing__(self, key):\n raise self.NotRegistered(key)\n\n def register(self, task):\n \"\"\"Register a task in the task registry.\n\n The task will be automatically instantiated if not already an\n instance.\n \"\"\"\n self[task.name] = inspect.isclass(task) and task() or task\n\n def unregister(self, name):\n \"\"\"Unregister task by name.\n\n Arguments:\n name (str): name of the task to unregister, or a\n :class:`celery.task.base.Task` with a valid `name` attribute.\n\n Raises:\n celery.exceptions.NotRegistered: if the task is not registered.\n \"\"\"\n try:\n self.pop(getattr(name, 'name', name))\n except KeyError:\n raise self.NotRegistered(name)\n\n # -- these methods are irrelevant now and will be removed in 4.0\n def regular(self):\n return self.filter_types('regular')\n\n def periodic(self):\n return self.filter_types('periodic')\n\n def filter_types(self, type):\n return {name: task for name, task in items(self)\n if getattr(task, 'type', 'regular') == type}\n\n\ndef _unpickle_task(name):\n return get_current_app().tasks[name]\n\n\ndef _unpickle_task_v2(name, module=None):\n if module:\n import_module(module)\n return get_current_app().tasks[name]\n", "path": "celery/app/registry.py"}]}
| 2,546 | 214 |
gh_patches_debug_40276
|
rasdani/github-patches
|
git_diff
|
litestar-org__litestar-3475
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: SerializationException when running modeling-and-features demo from docs
### Description
Hi,
First of all thanks for developing Litestar, it proves to be a very useful piece of software here. Unfortunately I ran into an issue.
I ran into an `msgspec_error` when requesting a page backed by sqlalchemy models which are connected via relationships. It seems that the database is correctly queried, a list of objects are returned, but then an exception is thrown when converting the objects to JSON.
I ran into this issue on my production code but when isolating an MCVE I noticed that the provided example in the documentation also shows the same unexpected behaviour on tested on two different machines. One crucial change to the code is however adding an author to the database.
Since this is quite a show-stopper for me: Thanks in advance for having a look at this!
### URL to code causing the issue
https://docs.litestar.dev/2/tutorials/repository-tutorial/01-modeling-and-features.html
### MCVE
```python
from datetime import date
from typing import TYPE_CHECKING
from uuid import UUID
from sqlalchemy import ForeignKey, select
from sqlalchemy.orm import Mapped, mapped_column, relationship
from litestar import Litestar, get
from litestar.contrib.sqlalchemy.base import UUIDAuditBase, UUIDBase
from litestar.contrib.sqlalchemy.plugins import AsyncSessionConfig, SQLAlchemyAsyncConfig, SQLAlchemyInitPlugin
if TYPE_CHECKING:
from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession
# the SQLAlchemy base includes a declarative model for you to use in your models.
# The `Base` class includes a `UUID` based primary key (`id`)
class Author(UUIDBase):
name: Mapped[str]
dob: Mapped[date]
books: Mapped[list["Book"]] = relationship(back_populates="author", lazy="selectin")
# The `AuditBase` class includes the same UUID` based primary key (`id`) and 2
# additional columns: `created_at` and `updated_at`. `created_at` is a timestamp of when the
# record created, and `updated_at` is the last time the record was modified.
class Book(UUIDAuditBase):
title: Mapped[str]
author_id: Mapped[UUID] = mapped_column(ForeignKey("author.id"))
author: Mapped[Author] = relationship(lazy="joined", innerjoin=True, viewonly=True)
session_config = AsyncSessionConfig(expire_on_commit=False)
sqlalchemy_config = SQLAlchemyAsyncConfig(
connection_string="sqlite+aiosqlite:///test.sqlite", session_config=session_config
) # Create 'async_session' dependency.
sqlalchemy_plugin = SQLAlchemyInitPlugin(config=sqlalchemy_config)
async def on_startup() -> None:
"""Initializes the database."""
async with sqlalchemy_config.get_engine().begin() as conn:
await conn.run_sync(UUIDBase.metadata.create_all)
#crucially there needs to be an author in the table for the error to appear
await conn.execute(Author.__table__.insert().values(name="F. Scott Fitzgerald"))
@get(path="/authors")
async def get_authors(db_session: "AsyncSession", db_engine: "AsyncEngine") -> list[Author]:
"""Interact with SQLAlchemy engine and session."""
return list(await db_session.scalars(select(Author)))
app = Litestar(
route_handlers=[get_authors],
on_startup=[on_startup],
plugins=[SQLAlchemyInitPlugin(config=sqlalchemy_config)],
debug=True
)
```
### Steps to reproduce
```bash
1. Go to the https://docs.litestar.dev/2/tutorials/repository-tutorial/01-modeling-and-features.html page
2. Download the code
3. Run the demo with minimal requirements installed and go to http://localhost:8000/authors
4. See the error
```
### Screenshots
_No response_
### Logs
```bash
File "/usr/local/lib/python3.12/site-packages/litestar/serialization/msgspec_hooks.py", line 143, in encode_json
raise SerializationException(str(msgspec_error)) from msgspec_error
litestar.exceptions.base_exceptions.SerializationException: Unsupported type: <class '__main__.Author'>
Traceback (most recent call last):
File "/usr/local/lib/python3.12/site-packages/litestar/serialization/msgspec_hooks.py", line 141, in encode_json
return msgspec.json.encode(value, enc_hook=serializer) if serializer else _msgspec_json_encoder.encode(value)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/litestar/serialization/msgspec_hooks.py", line 88, in default_serializer
raise TypeError(f"Unsupported type: {type(value)!r}")
TypeError: Unsupported type: <class '__main__.Author'>
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.12/site-packages/litestar/middleware/exceptions/middleware.py", line 219, in __call__
await self.app(scope, receive, send)
File "/usr/local/lib/python3.12/site-packages/litestar/routes/http.py", line 82, in handle
response = await self._get_response_for_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/litestar/routes/http.py", line 134, in _get_response_for_request
return await self._call_handler_function(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/litestar/routes/http.py", line 158, in _call_handler_function
response: ASGIApp = await route_handler.to_response(app=scope["app"], data=response_data, request=request)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/litestar/handlers/http_handlers/base.py", line 557, in to_response
return await response_handler(app=app, data=data, request=request) # type: ignore[call-arg]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/litestar/handlers/http_handlers/_utils.py", line 79, in handler
return response.to_asgi_response(app=None, request=request, headers=normalize_headers(headers), cookies=cookies) # pyright: ignore
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/litestar/response/base.py", line 451, in to_asgi_response
body=self.render(self.content, media_type, get_serializer(type_encoders)),
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/litestar/response/base.py", line 392, in render
return encode_json(content, enc_hook)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/litestar/serialization/msgspec_hooks.py", line 143, in encode_json
raise SerializationException(str(msgspec_error)) from msgspec_error
litestar.exceptions.base_exceptions.SerializationException: Unsupported type: <class '__main__.Author'>
INFO: 127.0.0.1:44906 - "GET /authors HTTP/1.1" 500 Internal Server Error
```
### Litestar Version
2.8.2
### Platform
- [X] Linux
- [X] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3464">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3464/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3464/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
</issue>
<code>
[start of docs/examples/contrib/sqlalchemy/sqlalchemy_declarative_models.py]
1 from datetime import date
2 from typing import TYPE_CHECKING
3 from uuid import UUID
4
5 from sqlalchemy import ForeignKey, select
6 from sqlalchemy.orm import Mapped, mapped_column, relationship
7
8 from litestar import Litestar, get
9 from litestar.contrib.sqlalchemy.base import UUIDAuditBase, UUIDBase
10 from litestar.contrib.sqlalchemy.plugins import AsyncSessionConfig, SQLAlchemyAsyncConfig, SQLAlchemyInitPlugin
11
12 if TYPE_CHECKING:
13 from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession
14
15
16 # the SQLAlchemy base includes a declarative model for you to use in your models.
17 # The `Base` class includes a `UUID` based primary key (`id`)
18 class Author(UUIDBase):
19 name: Mapped[str]
20 dob: Mapped[date]
21 books: Mapped[list["Book"]] = relationship(back_populates="author", lazy="selectin")
22
23
24 # The `AuditBase` class includes the same UUID` based primary key (`id`) and 2
25 # additional columns: `created_at` and `updated_at`. `created_at` is a timestamp of when the
26 # record created, and `updated_at` is the last time the record was modified.
27 class Book(UUIDAuditBase):
28 title: Mapped[str]
29 author_id: Mapped[UUID] = mapped_column(ForeignKey("author.id"))
30 author: Mapped[Author] = relationship(lazy="joined", innerjoin=True, viewonly=True)
31
32
33 session_config = AsyncSessionConfig(expire_on_commit=False)
34 sqlalchemy_config = SQLAlchemyAsyncConfig(
35 connection_string="sqlite+aiosqlite:///test.sqlite", session_config=session_config
36 ) # Create 'async_session' dependency.
37 sqlalchemy_plugin = SQLAlchemyInitPlugin(config=sqlalchemy_config)
38
39
40 async def on_startup() -> None:
41 """Initializes the database."""
42 async with sqlalchemy_config.get_engine().begin() as conn:
43 await conn.run_sync(UUIDBase.metadata.create_all)
44
45
46 @get(path="/authors")
47 async def get_authors(db_session: "AsyncSession", db_engine: "AsyncEngine") -> list[Author]:
48 """Interact with SQLAlchemy engine and session."""
49 return list(await db_session.scalars(select(Author)))
50
51
52 app = Litestar(
53 route_handlers=[get_authors],
54 on_startup=[on_startup],
55 plugins=[SQLAlchemyInitPlugin(config=sqlalchemy_config)],
56 )
57
[end of docs/examples/contrib/sqlalchemy/sqlalchemy_declarative_models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/examples/contrib/sqlalchemy/sqlalchemy_declarative_models.py b/docs/examples/contrib/sqlalchemy/sqlalchemy_declarative_models.py
--- a/docs/examples/contrib/sqlalchemy/sqlalchemy_declarative_models.py
+++ b/docs/examples/contrib/sqlalchemy/sqlalchemy_declarative_models.py
@@ -1,16 +1,17 @@
+from __future__ import annotations
+
+import uuid
from datetime import date
-from typing import TYPE_CHECKING
+from typing import List
from uuid import UUID
-from sqlalchemy import ForeignKey, select
+from sqlalchemy import ForeignKey, func, select
+from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession
from sqlalchemy.orm import Mapped, mapped_column, relationship
from litestar import Litestar, get
from litestar.contrib.sqlalchemy.base import UUIDAuditBase, UUIDBase
-from litestar.contrib.sqlalchemy.plugins import AsyncSessionConfig, SQLAlchemyAsyncConfig, SQLAlchemyInitPlugin
-
-if TYPE_CHECKING:
- from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession
+from litestar.contrib.sqlalchemy.plugins import AsyncSessionConfig, SQLAlchemyAsyncConfig, SQLAlchemyPlugin
# the SQLAlchemy base includes a declarative model for you to use in your models.
@@ -18,7 +19,7 @@
class Author(UUIDBase):
name: Mapped[str]
dob: Mapped[date]
- books: Mapped[list["Book"]] = relationship(back_populates="author", lazy="selectin")
+ books: Mapped[List[Book]] = relationship(back_populates="author", lazy="selectin")
# The `AuditBase` class includes the same UUID` based primary key (`id`) and 2
@@ -32,19 +33,24 @@
session_config = AsyncSessionConfig(expire_on_commit=False)
sqlalchemy_config = SQLAlchemyAsyncConfig(
- connection_string="sqlite+aiosqlite:///test.sqlite", session_config=session_config
+ connection_string="sqlite+aiosqlite:///test.sqlite", session_config=session_config, create_all=True
) # Create 'async_session' dependency.
-sqlalchemy_plugin = SQLAlchemyInitPlugin(config=sqlalchemy_config)
async def on_startup() -> None:
- """Initializes the database."""
- async with sqlalchemy_config.get_engine().begin() as conn:
- await conn.run_sync(UUIDBase.metadata.create_all)
+ """Adds some dummy data if no data is present."""
+ async with sqlalchemy_config.get_session() as session:
+ statement = select(func.count()).select_from(Author)
+ count = await session.execute(statement)
+ if not count.scalar():
+ author_id = uuid.uuid4()
+ session.add(Author(name="Stephen King", dob=date(1954, 9, 21), id=author_id))
+ session.add(Book(title="It", author_id=author_id))
+ await session.commit()
@get(path="/authors")
-async def get_authors(db_session: "AsyncSession", db_engine: "AsyncEngine") -> list[Author]:
+async def get_authors(db_session: AsyncSession, db_engine: AsyncEngine) -> List[Author]:
"""Interact with SQLAlchemy engine and session."""
return list(await db_session.scalars(select(Author)))
@@ -52,5 +58,6 @@
app = Litestar(
route_handlers=[get_authors],
on_startup=[on_startup],
- plugins=[SQLAlchemyInitPlugin(config=sqlalchemy_config)],
+ debug=True,
+ plugins=[SQLAlchemyPlugin(config=sqlalchemy_config)],
)
|
{"golden_diff": "diff --git a/docs/examples/contrib/sqlalchemy/sqlalchemy_declarative_models.py b/docs/examples/contrib/sqlalchemy/sqlalchemy_declarative_models.py\n--- a/docs/examples/contrib/sqlalchemy/sqlalchemy_declarative_models.py\n+++ b/docs/examples/contrib/sqlalchemy/sqlalchemy_declarative_models.py\n@@ -1,16 +1,17 @@\n+from __future__ import annotations\n+\n+import uuid\n from datetime import date\n-from typing import TYPE_CHECKING\n+from typing import List\n from uuid import UUID\n \n-from sqlalchemy import ForeignKey, select\n+from sqlalchemy import ForeignKey, func, select\n+from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession\n from sqlalchemy.orm import Mapped, mapped_column, relationship\n \n from litestar import Litestar, get\n from litestar.contrib.sqlalchemy.base import UUIDAuditBase, UUIDBase\n-from litestar.contrib.sqlalchemy.plugins import AsyncSessionConfig, SQLAlchemyAsyncConfig, SQLAlchemyInitPlugin\n-\n-if TYPE_CHECKING:\n- from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession\n+from litestar.contrib.sqlalchemy.plugins import AsyncSessionConfig, SQLAlchemyAsyncConfig, SQLAlchemyPlugin\n \n \n # the SQLAlchemy base includes a declarative model for you to use in your models.\n@@ -18,7 +19,7 @@\n class Author(UUIDBase):\n name: Mapped[str]\n dob: Mapped[date]\n- books: Mapped[list[\"Book\"]] = relationship(back_populates=\"author\", lazy=\"selectin\")\n+ books: Mapped[List[Book]] = relationship(back_populates=\"author\", lazy=\"selectin\")\n \n \n # The `AuditBase` class includes the same UUID` based primary key (`id`) and 2\n@@ -32,19 +33,24 @@\n \n session_config = AsyncSessionConfig(expire_on_commit=False)\n sqlalchemy_config = SQLAlchemyAsyncConfig(\n- connection_string=\"sqlite+aiosqlite:///test.sqlite\", session_config=session_config\n+ connection_string=\"sqlite+aiosqlite:///test.sqlite\", session_config=session_config, create_all=True\n ) # Create 'async_session' dependency.\n-sqlalchemy_plugin = SQLAlchemyInitPlugin(config=sqlalchemy_config)\n \n \n async def on_startup() -> None:\n- \"\"\"Initializes the database.\"\"\"\n- async with sqlalchemy_config.get_engine().begin() as conn:\n- await conn.run_sync(UUIDBase.metadata.create_all)\n+ \"\"\"Adds some dummy data if no data is present.\"\"\"\n+ async with sqlalchemy_config.get_session() as session:\n+ statement = select(func.count()).select_from(Author)\n+ count = await session.execute(statement)\n+ if not count.scalar():\n+ author_id = uuid.uuid4()\n+ session.add(Author(name=\"Stephen King\", dob=date(1954, 9, 21), id=author_id))\n+ session.add(Book(title=\"It\", author_id=author_id))\n+ await session.commit()\n \n \n @get(path=\"/authors\")\n-async def get_authors(db_session: \"AsyncSession\", db_engine: \"AsyncEngine\") -> list[Author]:\n+async def get_authors(db_session: AsyncSession, db_engine: AsyncEngine) -> List[Author]:\n \"\"\"Interact with SQLAlchemy engine and session.\"\"\"\n return list(await db_session.scalars(select(Author)))\n \n@@ -52,5 +58,6 @@\n app = Litestar(\n route_handlers=[get_authors],\n on_startup=[on_startup],\n- plugins=[SQLAlchemyInitPlugin(config=sqlalchemy_config)],\n+ debug=True,\n+ plugins=[SQLAlchemyPlugin(config=sqlalchemy_config)],\n )\n", "issue": "Bug: SerializationException when running modeling-and-features demo from docs\n### Description\r\n\r\nHi,\r\n\r\nFirst of all thanks for developing Litestar, it proves to be a very useful piece of software here. Unfortunately I ran into an issue. \r\n\r\nI ran into an `msgspec_error` when requesting a page backed by sqlalchemy models which are connected via relationships. It seems that the database is correctly queried, a list of objects are returned, but then an exception is thrown when converting the objects to JSON.\r\n\r\nI ran into this issue on my production code but when isolating an MCVE I noticed that the provided example in the documentation also shows the same unexpected behaviour on tested on two different machines. One crucial change to the code is however adding an author to the database.\r\n\r\nSince this is quite a show-stopper for me: Thanks in advance for having a look at this!\r\n\r\n### URL to code causing the issue\r\n\r\nhttps://docs.litestar.dev/2/tutorials/repository-tutorial/01-modeling-and-features.html\r\n\r\n### MCVE\r\n\r\n```python\r\nfrom datetime import date\r\nfrom typing import TYPE_CHECKING\r\nfrom uuid import UUID\r\n\r\nfrom sqlalchemy import ForeignKey, select\r\nfrom sqlalchemy.orm import Mapped, mapped_column, relationship\r\n\r\nfrom litestar import Litestar, get\r\nfrom litestar.contrib.sqlalchemy.base import UUIDAuditBase, UUIDBase\r\nfrom litestar.contrib.sqlalchemy.plugins import AsyncSessionConfig, SQLAlchemyAsyncConfig, SQLAlchemyInitPlugin\r\n\r\nif TYPE_CHECKING:\r\n from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession\r\n\r\n\r\n# the SQLAlchemy base includes a declarative model for you to use in your models.\r\n# The `Base` class includes a `UUID` based primary key (`id`)\r\nclass Author(UUIDBase):\r\n name: Mapped[str]\r\n dob: Mapped[date]\r\n books: Mapped[list[\"Book\"]] = relationship(back_populates=\"author\", lazy=\"selectin\")\r\n\r\n\r\n# The `AuditBase` class includes the same UUID` based primary key (`id`) and 2\r\n# additional columns: `created_at` and `updated_at`. `created_at` is a timestamp of when the\r\n# record created, and `updated_at` is the last time the record was modified.\r\nclass Book(UUIDAuditBase):\r\n title: Mapped[str]\r\n author_id: Mapped[UUID] = mapped_column(ForeignKey(\"author.id\"))\r\n author: Mapped[Author] = relationship(lazy=\"joined\", innerjoin=True, viewonly=True)\r\n\r\n\r\nsession_config = AsyncSessionConfig(expire_on_commit=False)\r\nsqlalchemy_config = SQLAlchemyAsyncConfig(\r\n connection_string=\"sqlite+aiosqlite:///test.sqlite\", session_config=session_config\r\n) # Create 'async_session' dependency.\r\nsqlalchemy_plugin = SQLAlchemyInitPlugin(config=sqlalchemy_config)\r\n\r\n\r\nasync def on_startup() -> None:\r\n \"\"\"Initializes the database.\"\"\"\r\n async with sqlalchemy_config.get_engine().begin() as conn:\r\n await conn.run_sync(UUIDBase.metadata.create_all)\r\n \r\n #crucially there needs to be an author in the table for the error to appear\r\n await conn.execute(Author.__table__.insert().values(name=\"F. Scott Fitzgerald\"))\r\n\r\n\r\n@get(path=\"/authors\")\r\nasync def get_authors(db_session: \"AsyncSession\", db_engine: \"AsyncEngine\") -> list[Author]:\r\n \"\"\"Interact with SQLAlchemy engine and session.\"\"\"\r\n return list(await db_session.scalars(select(Author)))\r\n\r\n\r\napp = Litestar(\r\n route_handlers=[get_authors],\r\n on_startup=[on_startup],\r\n plugins=[SQLAlchemyInitPlugin(config=sqlalchemy_config)],\r\n debug=True\r\n)\r\n```\r\n\r\n\r\n### Steps to reproduce\r\n\r\n```bash\r\n1. Go to the https://docs.litestar.dev/2/tutorials/repository-tutorial/01-modeling-and-features.html page\r\n2. Download the code\r\n3. Run the demo with minimal requirements installed and go to http://localhost:8000/authors\r\n4. See the error\r\n```\r\n\r\n\r\n### Screenshots\r\n\r\n_No response_\r\n\r\n### Logs\r\n\r\n```bash\r\nFile \"/usr/local/lib/python3.12/site-packages/litestar/serialization/msgspec_hooks.py\", line 143, in encode_json\r\n raise SerializationException(str(msgspec_error)) from msgspec_error\r\nlitestar.exceptions.base_exceptions.SerializationException: Unsupported type: <class '__main__.Author'>\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.12/site-packages/litestar/serialization/msgspec_hooks.py\", line 141, in encode_json\r\n return msgspec.json.encode(value, enc_hook=serializer) if serializer else _msgspec_json_encoder.encode(value)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.12/site-packages/litestar/serialization/msgspec_hooks.py\", line 88, in default_serializer\r\n raise TypeError(f\"Unsupported type: {type(value)!r}\")\r\nTypeError: Unsupported type: <class '__main__.Author'>\r\n \r\nThe above exception was the direct cause of the following exception:\r\n \r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.12/site-packages/litestar/middleware/exceptions/middleware.py\", line 219, in __call__\r\n await self.app(scope, receive, send)\r\n File \"/usr/local/lib/python3.12/site-packages/litestar/routes/http.py\", line 82, in handle\r\n response = await self._get_response_for_request(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.12/site-packages/litestar/routes/http.py\", line 134, in _get_response_for_request\r\n return await self._call_handler_function(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.12/site-packages/litestar/routes/http.py\", line 158, in _call_handler_function\r\n response: ASGIApp = await route_handler.to_response(app=scope[\"app\"], data=response_data, request=request)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.12/site-packages/litestar/handlers/http_handlers/base.py\", line 557, in to_response\r\n return await response_handler(app=app, data=data, request=request) # type: ignore[call-arg]\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.12/site-packages/litestar/handlers/http_handlers/_utils.py\", line 79, in handler\r\n return response.to_asgi_response(app=None, request=request, headers=normalize_headers(headers), cookies=cookies) # pyright: ignore\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.12/site-packages/litestar/response/base.py\", line 451, in to_asgi_response\r\n body=self.render(self.content, media_type, get_serializer(type_encoders)),\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.12/site-packages/litestar/response/base.py\", line 392, in render\r\n return encode_json(content, enc_hook)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.12/site-packages/litestar/serialization/msgspec_hooks.py\", line 143, in encode_json\r\n raise SerializationException(str(msgspec_error)) from msgspec_error\r\nlitestar.exceptions.base_exceptions.SerializationException: Unsupported type: <class '__main__.Author'>\r\nINFO: 127.0.0.1:44906 - \"GET /authors HTTP/1.1\" 500 Internal Server Error\r\n```\r\n\r\n\r\n### Litestar Version\r\n\r\n2.8.2\r\n\r\n### Platform\r\n\r\n- [X] Linux\r\n- [X] Mac\r\n- [ ] Windows\r\n- [ ] Other (Please specify in the description above)\r\n\r\n<!-- POLAR PLEDGE BADGE START -->\r\n---\r\n> [!NOTE] \r\n> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and \r\n> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.\r\n>\r\n> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)\r\n> * If you would like to see an issue prioritized, make a pledge towards it!\r\n> * We receive the pledge once the issue is completed & verified\r\n> * This, along with engagement in the community, helps us know which features are a priority to our users.\r\n\r\n<a href=\"https://polar.sh/litestar-org/litestar/issues/3464\">\r\n<picture>\r\n <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://polar.sh/api/github/litestar-org/litestar/issues/3464/pledge.svg?darkmode=1\">\r\n <img alt=\"Fund with Polar\" src=\"https://polar.sh/api/github/litestar-org/litestar/issues/3464/pledge.svg\">\r\n</picture>\r\n</a>\r\n<!-- POLAR PLEDGE BADGE END -->\r\n\n", "before_files": [{"content": "from datetime import date\nfrom typing import TYPE_CHECKING\nfrom uuid import UUID\n\nfrom sqlalchemy import ForeignKey, select\nfrom sqlalchemy.orm import Mapped, mapped_column, relationship\n\nfrom litestar import Litestar, get\nfrom litestar.contrib.sqlalchemy.base import UUIDAuditBase, UUIDBase\nfrom litestar.contrib.sqlalchemy.plugins import AsyncSessionConfig, SQLAlchemyAsyncConfig, SQLAlchemyInitPlugin\n\nif TYPE_CHECKING:\n from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession\n\n\n# the SQLAlchemy base includes a declarative model for you to use in your models.\n# The `Base` class includes a `UUID` based primary key (`id`)\nclass Author(UUIDBase):\n name: Mapped[str]\n dob: Mapped[date]\n books: Mapped[list[\"Book\"]] = relationship(back_populates=\"author\", lazy=\"selectin\")\n\n\n# The `AuditBase` class includes the same UUID` based primary key (`id`) and 2\n# additional columns: `created_at` and `updated_at`. `created_at` is a timestamp of when the\n# record created, and `updated_at` is the last time the record was modified.\nclass Book(UUIDAuditBase):\n title: Mapped[str]\n author_id: Mapped[UUID] = mapped_column(ForeignKey(\"author.id\"))\n author: Mapped[Author] = relationship(lazy=\"joined\", innerjoin=True, viewonly=True)\n\n\nsession_config = AsyncSessionConfig(expire_on_commit=False)\nsqlalchemy_config = SQLAlchemyAsyncConfig(\n connection_string=\"sqlite+aiosqlite:///test.sqlite\", session_config=session_config\n) # Create 'async_session' dependency.\nsqlalchemy_plugin = SQLAlchemyInitPlugin(config=sqlalchemy_config)\n\n\nasync def on_startup() -> None:\n \"\"\"Initializes the database.\"\"\"\n async with sqlalchemy_config.get_engine().begin() as conn:\n await conn.run_sync(UUIDBase.metadata.create_all)\n\n\n@get(path=\"/authors\")\nasync def get_authors(db_session: \"AsyncSession\", db_engine: \"AsyncEngine\") -> list[Author]:\n \"\"\"Interact with SQLAlchemy engine and session.\"\"\"\n return list(await db_session.scalars(select(Author)))\n\n\napp = Litestar(\n route_handlers=[get_authors],\n on_startup=[on_startup],\n plugins=[SQLAlchemyInitPlugin(config=sqlalchemy_config)],\n)\n", "path": "docs/examples/contrib/sqlalchemy/sqlalchemy_declarative_models.py"}]}
| 3,163 | 759 |
gh_patches_debug_18326
|
rasdani/github-patches
|
git_diff
|
pypa__virtualenv-1788
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pinning pip fails via app data seeder
```
VIRTUALENV_PIP=19.2.3 virtualenv --download --seeder app-data .venv
ERROR: Invalid requirement: 'pip=19.2.3'
```
from https://github.com/pglass/tox-pip-version/issues/17#issuecomment-616246861
</issue>
<code>
[start of src/virtualenv/util/path/_pathlib/via_os_path.py]
1 from __future__ import absolute_import, unicode_literals
2
3 import os
4 import platform
5 from contextlib import contextmanager
6
7 from virtualenv.util.six import ensure_str, ensure_text
8
9 IS_PYPY = platform.python_implementation() == "PyPy"
10
11
12 class Path(object):
13 def __init__(self, path):
14 if isinstance(path, Path):
15 _path = path._path
16 else:
17 _path = ensure_text(path)
18 if IS_PYPY:
19 _path = _path.encode("utf-8")
20 self._path = _path
21
22 def __repr__(self):
23 return ensure_str("Path({})".format(ensure_text(self._path)))
24
25 def __unicode__(self):
26 return ensure_text(self._path)
27
28 def __str__(self):
29 return ensure_str(self._path)
30
31 def __div__(self, other):
32 if isinstance(other, Path):
33 right = other._path
34 else:
35 right = ensure_text(other)
36 if IS_PYPY:
37 right = right.encode("utf-8")
38 return Path(os.path.join(self._path, right))
39
40 def __truediv__(self, other):
41 return self.__div__(other)
42
43 def __eq__(self, other):
44 return self._path == (other._path if isinstance(other, Path) else None)
45
46 def __ne__(self, other):
47 return not (self == other)
48
49 def __hash__(self):
50 return hash(self._path)
51
52 def exists(self):
53 return os.path.exists(self._path)
54
55 @property
56 def parent(self):
57 return Path(os.path.abspath(os.path.join(self._path, os.path.pardir)))
58
59 def resolve(self):
60 return Path(os.path.realpath(self._path))
61
62 @property
63 def name(self):
64 return os.path.basename(self._path)
65
66 @property
67 def parts(self):
68 return self._path.split(os.sep)
69
70 def is_file(self):
71 return os.path.isfile(self._path)
72
73 def is_dir(self):
74 return os.path.isdir(self._path)
75
76 def mkdir(self, parents=True, exist_ok=True):
77 if not self.exists() and exist_ok:
78 os.makedirs(self._path)
79
80 def read_text(self, encoding="utf-8"):
81 return self.read_bytes().decode(encoding)
82
83 def read_bytes(self):
84 with open(self._path, "rb") as file_handler:
85 return file_handler.read()
86
87 def write_text(self, text, encoding="utf-8"):
88 with open(self._path, "wb") as file_handler:
89 file_handler.write(text.encode(encoding))
90
91 def iterdir(self):
92 for p in os.listdir(self._path):
93 yield Path(os.path.join(self._path, p))
94
95 @property
96 def suffix(self):
97 _, ext = os.path.splitext(self.name)
98 return ext
99
100 @property
101 def stem(self):
102 base, _ = os.path.splitext(self.name)
103 return base
104
105 @contextmanager
106 def open(self, mode="r"):
107 with open(self._path, mode) as file_handler:
108 yield file_handler
109
110 @property
111 def parents(self):
112 result = []
113 parts = self.parts
114 for i in range(len(parts)):
115 result.append(Path(os.sep.join(parts[0 : i + 1])))
116 return result
117
118 def unlink(self):
119 os.remove(self._path)
120
121 def with_name(self, name):
122 return self.parent / name
123
124 def is_symlink(self):
125 return os.path.islink(self._path)
126
127 def relative_to(self, other):
128 if not self._path.startswith(other._path):
129 raise ValueError("{} does not start with {}".format(self._path, other._path))
130 return Path(os.sep.join(self.parts[len(other.parts) :]))
131
132 def stat(self):
133 return os.stat(self._path)
134
135 def chmod(self, mode):
136 os.chmod(self._path, mode)
137
138
139 __all__ = ("Path",)
140
[end of src/virtualenv/util/path/_pathlib/via_os_path.py]
[start of src/virtualenv/seed/embed/wheels/acquire.py]
1 """Bootstrap"""
2 from __future__ import absolute_import, unicode_literals
3
4 import logging
5 import os
6 import sys
7 from collections import defaultdict
8 from contextlib import contextmanager
9 from copy import copy
10 from shutil import copy2
11 from zipfile import ZipFile
12
13 from virtualenv.info import IS_ZIPAPP
14 from virtualenv.util.path import Path
15 from virtualenv.util.six import ensure_str, ensure_text
16 from virtualenv.util.subprocess import Popen, subprocess
17 from virtualenv.util.zipapp import ensure_file_on_disk
18
19 from . import BUNDLE_SUPPORT, MAX
20
21 BUNDLE_FOLDER = Path(os.path.abspath(__file__)).parent
22
23
24 def get_wheels(for_py_version, wheel_cache_dir, extra_search_dir, download, packages, app_data):
25 # not all wheels are compatible with all python versions, so we need to py version qualify it
26 processed = copy(packages)
27 # 1. acquire from bundle
28 acquire_from_bundle(processed, for_py_version, wheel_cache_dir)
29 # 2. acquire from extra search dir
30 acquire_from_dir(processed, for_py_version, wheel_cache_dir, extra_search_dir)
31 # 3. download from the internet
32 if download and processed:
33 download_wheel(processed, for_py_version, wheel_cache_dir, app_data)
34
35 # in the end just get the wheels
36 wheels = _get_wheels(wheel_cache_dir, packages)
37 return {p: next(iter(ver_to_files))[1] for p, ver_to_files in wheels.items()}
38
39
40 def acquire_from_bundle(packages, for_py_version, to_folder):
41 for pkg, version in list(packages.items()):
42 bundle = get_bundled_wheel(pkg, for_py_version)
43 if bundle is not None:
44 pkg_version = bundle.stem.split("-")[1]
45 exact_version_match = version == pkg_version
46 if exact_version_match:
47 del packages[pkg]
48 if version is None or exact_version_match:
49 bundled_wheel_file = to_folder / bundle.name
50 if not bundled_wheel_file.exists():
51 logging.debug("get bundled wheel %s", bundle)
52 if IS_ZIPAPP:
53 from virtualenv.util.zipapp import extract
54
55 extract(bundle, bundled_wheel_file)
56 else:
57 copy2(str(bundle), str(bundled_wheel_file))
58
59
60 def get_bundled_wheel(package, version_release):
61 return BUNDLE_FOLDER / (BUNDLE_SUPPORT.get(version_release, {}) or BUNDLE_SUPPORT[MAX]).get(package)
62
63
64 def acquire_from_dir(packages, for_py_version, to_folder, extra_search_dir):
65 if not packages:
66 return
67 for search_dir in extra_search_dir:
68 wheels = _get_wheels(search_dir, packages)
69 for pkg, ver_wheels in wheels.items():
70 stop = False
71 for _, filename in ver_wheels:
72 dest = to_folder / filename.name
73 if not dest.exists():
74 if wheel_support_py(filename, for_py_version):
75 logging.debug("get extra search dir wheel %s", filename)
76 copy2(str(filename), str(dest))
77 stop = True
78 else:
79 stop = True
80 if stop and packages[pkg] is not None:
81 del packages[pkg]
82 break
83
84
85 def wheel_support_py(filename, py_version):
86 name = "{}.dist-info/METADATA".format("-".join(filename.stem.split("-")[0:2]))
87 with ZipFile(ensure_text(str(filename)), "r") as zip_file:
88 metadata = zip_file.read(name).decode("utf-8")
89 marker = "Requires-Python:"
90 requires = next((i[len(marker) :] for i in metadata.splitlines() if i.startswith(marker)), None)
91 if requires is None: # if it does not specify a python requires the assumption is compatible
92 return True
93 py_version_int = tuple(int(i) for i in py_version.split("."))
94 for require in (i.strip() for i in requires.split(",")):
95 # https://www.python.org/dev/peps/pep-0345/#version-specifiers
96 for operator, check in [
97 ("!=", lambda v: py_version_int != v),
98 ("==", lambda v: py_version_int == v),
99 ("<=", lambda v: py_version_int <= v),
100 (">=", lambda v: py_version_int >= v),
101 ("<", lambda v: py_version_int < v),
102 (">", lambda v: py_version_int > v),
103 ]:
104 if require.startswith(operator):
105 ver_str = require[len(operator) :].strip()
106 version = tuple((int(i) if i != "*" else None) for i in ver_str.split("."))[0:2]
107 if not check(version):
108 return False
109 break
110 return True
111
112
113 def _get_wheels(from_folder, packages):
114 wheels = defaultdict(list)
115 for filename in from_folder.iterdir():
116 if filename.suffix == ".whl":
117 data = filename.stem.split("-")
118 if len(data) >= 2:
119 pkg, version = data[0:2]
120 if pkg in packages:
121 pkg_version = packages[pkg]
122 if pkg_version is None or pkg_version == version:
123 wheels[pkg].append((version, filename))
124 for versions in wheels.values():
125 versions.sort(
126 key=lambda a: tuple(int(i) if i.isdigit() else i for i in a[0].split(".")), reverse=True,
127 )
128 return wheels
129
130
131 def download_wheel(packages, for_py_version, to_folder, app_data):
132 to_download = list(p if v is None else "{}={}".format(p, v) for p, v in packages.items())
133 logging.debug("download wheels %s", to_download)
134 cmd = [
135 sys.executable,
136 "-m",
137 "pip",
138 "download",
139 "--disable-pip-version-check",
140 "--only-binary=:all:",
141 "--no-deps",
142 "--python-version",
143 for_py_version,
144 "-d",
145 str(to_folder),
146 ]
147 cmd.extend(to_download)
148 # pip has no interface in python - must be a new sub-process
149
150 with pip_wheel_env_run("{}{}".format(*sys.version_info[0:2]), app_data) as env:
151 process = Popen(cmd, env=env, stdout=subprocess.PIPE)
152 process.communicate()
153 if process.returncode != 0:
154 raise RuntimeError("failed to download wheels")
155
156
157 @contextmanager
158 def pip_wheel_env_run(version, app_data):
159 env = os.environ.copy()
160 env.update(
161 {
162 ensure_str(k): str(v) # python 2 requires these to be string only (non-unicode)
163 for k, v in {"PIP_USE_WHEEL": "1", "PIP_USER": "0", "PIP_NO_INPUT": "1"}.items()
164 }
165 )
166 with ensure_file_on_disk(get_bundled_wheel("pip", version), app_data) as pip_wheel_path:
167 # put the bundled wheel onto the path, and use it to do the bootstrap operation
168 env[str("PYTHONPATH")] = str(pip_wheel_path)
169 yield env
170
[end of src/virtualenv/seed/embed/wheels/acquire.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/virtualenv/seed/embed/wheels/acquire.py b/src/virtualenv/seed/embed/wheels/acquire.py
--- a/src/virtualenv/seed/embed/wheels/acquire.py
+++ b/src/virtualenv/seed/embed/wheels/acquire.py
@@ -129,7 +129,7 @@
def download_wheel(packages, for_py_version, to_folder, app_data):
- to_download = list(p if v is None else "{}={}".format(p, v) for p, v in packages.items())
+ to_download = list(p if v is None else "{}=={}".format(p, v) for p, v in packages.items())
logging.debug("download wheels %s", to_download)
cmd = [
sys.executable,
diff --git a/src/virtualenv/util/path/_pathlib/via_os_path.py b/src/virtualenv/util/path/_pathlib/via_os_path.py
--- a/src/virtualenv/util/path/_pathlib/via_os_path.py
+++ b/src/virtualenv/util/path/_pathlib/via_os_path.py
@@ -111,9 +111,9 @@
def parents(self):
result = []
parts = self.parts
- for i in range(len(parts)):
+ for i in range(len(parts) - 1):
result.append(Path(os.sep.join(parts[0 : i + 1])))
- return result
+ return result[::-1]
def unlink(self):
os.remove(self._path)
|
{"golden_diff": "diff --git a/src/virtualenv/seed/embed/wheels/acquire.py b/src/virtualenv/seed/embed/wheels/acquire.py\n--- a/src/virtualenv/seed/embed/wheels/acquire.py\n+++ b/src/virtualenv/seed/embed/wheels/acquire.py\n@@ -129,7 +129,7 @@\n \n \n def download_wheel(packages, for_py_version, to_folder, app_data):\n- to_download = list(p if v is None else \"{}={}\".format(p, v) for p, v in packages.items())\n+ to_download = list(p if v is None else \"{}=={}\".format(p, v) for p, v in packages.items())\n logging.debug(\"download wheels %s\", to_download)\n cmd = [\n sys.executable,\ndiff --git a/src/virtualenv/util/path/_pathlib/via_os_path.py b/src/virtualenv/util/path/_pathlib/via_os_path.py\n--- a/src/virtualenv/util/path/_pathlib/via_os_path.py\n+++ b/src/virtualenv/util/path/_pathlib/via_os_path.py\n@@ -111,9 +111,9 @@\n def parents(self):\n result = []\n parts = self.parts\n- for i in range(len(parts)):\n+ for i in range(len(parts) - 1):\n result.append(Path(os.sep.join(parts[0 : i + 1])))\n- return result\n+ return result[::-1]\n \n def unlink(self):\n os.remove(self._path)\n", "issue": "pinning pip fails via app data seeder\n```\r\nVIRTUALENV_PIP=19.2.3 virtualenv --download --seeder app-data .venv\r\nERROR: Invalid requirement: 'pip=19.2.3'\r\n```\r\n\r\nfrom https://github.com/pglass/tox-pip-version/issues/17#issuecomment-616246861\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nimport os\nimport platform\nfrom contextlib import contextmanager\n\nfrom virtualenv.util.six import ensure_str, ensure_text\n\nIS_PYPY = platform.python_implementation() == \"PyPy\"\n\n\nclass Path(object):\n def __init__(self, path):\n if isinstance(path, Path):\n _path = path._path\n else:\n _path = ensure_text(path)\n if IS_PYPY:\n _path = _path.encode(\"utf-8\")\n self._path = _path\n\n def __repr__(self):\n return ensure_str(\"Path({})\".format(ensure_text(self._path)))\n\n def __unicode__(self):\n return ensure_text(self._path)\n\n def __str__(self):\n return ensure_str(self._path)\n\n def __div__(self, other):\n if isinstance(other, Path):\n right = other._path\n else:\n right = ensure_text(other)\n if IS_PYPY:\n right = right.encode(\"utf-8\")\n return Path(os.path.join(self._path, right))\n\n def __truediv__(self, other):\n return self.__div__(other)\n\n def __eq__(self, other):\n return self._path == (other._path if isinstance(other, Path) else None)\n\n def __ne__(self, other):\n return not (self == other)\n\n def __hash__(self):\n return hash(self._path)\n\n def exists(self):\n return os.path.exists(self._path)\n\n @property\n def parent(self):\n return Path(os.path.abspath(os.path.join(self._path, os.path.pardir)))\n\n def resolve(self):\n return Path(os.path.realpath(self._path))\n\n @property\n def name(self):\n return os.path.basename(self._path)\n\n @property\n def parts(self):\n return self._path.split(os.sep)\n\n def is_file(self):\n return os.path.isfile(self._path)\n\n def is_dir(self):\n return os.path.isdir(self._path)\n\n def mkdir(self, parents=True, exist_ok=True):\n if not self.exists() and exist_ok:\n os.makedirs(self._path)\n\n def read_text(self, encoding=\"utf-8\"):\n return self.read_bytes().decode(encoding)\n\n def read_bytes(self):\n with open(self._path, \"rb\") as file_handler:\n return file_handler.read()\n\n def write_text(self, text, encoding=\"utf-8\"):\n with open(self._path, \"wb\") as file_handler:\n file_handler.write(text.encode(encoding))\n\n def iterdir(self):\n for p in os.listdir(self._path):\n yield Path(os.path.join(self._path, p))\n\n @property\n def suffix(self):\n _, ext = os.path.splitext(self.name)\n return ext\n\n @property\n def stem(self):\n base, _ = os.path.splitext(self.name)\n return base\n\n @contextmanager\n def open(self, mode=\"r\"):\n with open(self._path, mode) as file_handler:\n yield file_handler\n\n @property\n def parents(self):\n result = []\n parts = self.parts\n for i in range(len(parts)):\n result.append(Path(os.sep.join(parts[0 : i + 1])))\n return result\n\n def unlink(self):\n os.remove(self._path)\n\n def with_name(self, name):\n return self.parent / name\n\n def is_symlink(self):\n return os.path.islink(self._path)\n\n def relative_to(self, other):\n if not self._path.startswith(other._path):\n raise ValueError(\"{} does not start with {}\".format(self._path, other._path))\n return Path(os.sep.join(self.parts[len(other.parts) :]))\n\n def stat(self):\n return os.stat(self._path)\n\n def chmod(self, mode):\n os.chmod(self._path, mode)\n\n\n__all__ = (\"Path\",)\n", "path": "src/virtualenv/util/path/_pathlib/via_os_path.py"}, {"content": "\"\"\"Bootstrap\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nimport logging\nimport os\nimport sys\nfrom collections import defaultdict\nfrom contextlib import contextmanager\nfrom copy import copy\nfrom shutil import copy2\nfrom zipfile import ZipFile\n\nfrom virtualenv.info import IS_ZIPAPP\nfrom virtualenv.util.path import Path\nfrom virtualenv.util.six import ensure_str, ensure_text\nfrom virtualenv.util.subprocess import Popen, subprocess\nfrom virtualenv.util.zipapp import ensure_file_on_disk\n\nfrom . import BUNDLE_SUPPORT, MAX\n\nBUNDLE_FOLDER = Path(os.path.abspath(__file__)).parent\n\n\ndef get_wheels(for_py_version, wheel_cache_dir, extra_search_dir, download, packages, app_data):\n # not all wheels are compatible with all python versions, so we need to py version qualify it\n processed = copy(packages)\n # 1. acquire from bundle\n acquire_from_bundle(processed, for_py_version, wheel_cache_dir)\n # 2. acquire from extra search dir\n acquire_from_dir(processed, for_py_version, wheel_cache_dir, extra_search_dir)\n # 3. download from the internet\n if download and processed:\n download_wheel(processed, for_py_version, wheel_cache_dir, app_data)\n\n # in the end just get the wheels\n wheels = _get_wheels(wheel_cache_dir, packages)\n return {p: next(iter(ver_to_files))[1] for p, ver_to_files in wheels.items()}\n\n\ndef acquire_from_bundle(packages, for_py_version, to_folder):\n for pkg, version in list(packages.items()):\n bundle = get_bundled_wheel(pkg, for_py_version)\n if bundle is not None:\n pkg_version = bundle.stem.split(\"-\")[1]\n exact_version_match = version == pkg_version\n if exact_version_match:\n del packages[pkg]\n if version is None or exact_version_match:\n bundled_wheel_file = to_folder / bundle.name\n if not bundled_wheel_file.exists():\n logging.debug(\"get bundled wheel %s\", bundle)\n if IS_ZIPAPP:\n from virtualenv.util.zipapp import extract\n\n extract(bundle, bundled_wheel_file)\n else:\n copy2(str(bundle), str(bundled_wheel_file))\n\n\ndef get_bundled_wheel(package, version_release):\n return BUNDLE_FOLDER / (BUNDLE_SUPPORT.get(version_release, {}) or BUNDLE_SUPPORT[MAX]).get(package)\n\n\ndef acquire_from_dir(packages, for_py_version, to_folder, extra_search_dir):\n if not packages:\n return\n for search_dir in extra_search_dir:\n wheels = _get_wheels(search_dir, packages)\n for pkg, ver_wheels in wheels.items():\n stop = False\n for _, filename in ver_wheels:\n dest = to_folder / filename.name\n if not dest.exists():\n if wheel_support_py(filename, for_py_version):\n logging.debug(\"get extra search dir wheel %s\", filename)\n copy2(str(filename), str(dest))\n stop = True\n else:\n stop = True\n if stop and packages[pkg] is not None:\n del packages[pkg]\n break\n\n\ndef wheel_support_py(filename, py_version):\n name = \"{}.dist-info/METADATA\".format(\"-\".join(filename.stem.split(\"-\")[0:2]))\n with ZipFile(ensure_text(str(filename)), \"r\") as zip_file:\n metadata = zip_file.read(name).decode(\"utf-8\")\n marker = \"Requires-Python:\"\n requires = next((i[len(marker) :] for i in metadata.splitlines() if i.startswith(marker)), None)\n if requires is None: # if it does not specify a python requires the assumption is compatible\n return True\n py_version_int = tuple(int(i) for i in py_version.split(\".\"))\n for require in (i.strip() for i in requires.split(\",\")):\n # https://www.python.org/dev/peps/pep-0345/#version-specifiers\n for operator, check in [\n (\"!=\", lambda v: py_version_int != v),\n (\"==\", lambda v: py_version_int == v),\n (\"<=\", lambda v: py_version_int <= v),\n (\">=\", lambda v: py_version_int >= v),\n (\"<\", lambda v: py_version_int < v),\n (\">\", lambda v: py_version_int > v),\n ]:\n if require.startswith(operator):\n ver_str = require[len(operator) :].strip()\n version = tuple((int(i) if i != \"*\" else None) for i in ver_str.split(\".\"))[0:2]\n if not check(version):\n return False\n break\n return True\n\n\ndef _get_wheels(from_folder, packages):\n wheels = defaultdict(list)\n for filename in from_folder.iterdir():\n if filename.suffix == \".whl\":\n data = filename.stem.split(\"-\")\n if len(data) >= 2:\n pkg, version = data[0:2]\n if pkg in packages:\n pkg_version = packages[pkg]\n if pkg_version is None or pkg_version == version:\n wheels[pkg].append((version, filename))\n for versions in wheels.values():\n versions.sort(\n key=lambda a: tuple(int(i) if i.isdigit() else i for i in a[0].split(\".\")), reverse=True,\n )\n return wheels\n\n\ndef download_wheel(packages, for_py_version, to_folder, app_data):\n to_download = list(p if v is None else \"{}={}\".format(p, v) for p, v in packages.items())\n logging.debug(\"download wheels %s\", to_download)\n cmd = [\n sys.executable,\n \"-m\",\n \"pip\",\n \"download\",\n \"--disable-pip-version-check\",\n \"--only-binary=:all:\",\n \"--no-deps\",\n \"--python-version\",\n for_py_version,\n \"-d\",\n str(to_folder),\n ]\n cmd.extend(to_download)\n # pip has no interface in python - must be a new sub-process\n\n with pip_wheel_env_run(\"{}{}\".format(*sys.version_info[0:2]), app_data) as env:\n process = Popen(cmd, env=env, stdout=subprocess.PIPE)\n process.communicate()\n if process.returncode != 0:\n raise RuntimeError(\"failed to download wheels\")\n\n\n@contextmanager\ndef pip_wheel_env_run(version, app_data):\n env = os.environ.copy()\n env.update(\n {\n ensure_str(k): str(v) # python 2 requires these to be string only (non-unicode)\n for k, v in {\"PIP_USE_WHEEL\": \"1\", \"PIP_USER\": \"0\", \"PIP_NO_INPUT\": \"1\"}.items()\n }\n )\n with ensure_file_on_disk(get_bundled_wheel(\"pip\", version), app_data) as pip_wheel_path:\n # put the bundled wheel onto the path, and use it to do the bootstrap operation\n env[str(\"PYTHONPATH\")] = str(pip_wheel_path)\n yield env\n", "path": "src/virtualenv/seed/embed/wheels/acquire.py"}]}
| 3,765 | 328 |
gh_patches_debug_32554
|
rasdani/github-patches
|
git_diff
|
crytic__slither-162
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Redundant inheritance lines in inheritance-graph printer
Hey guys! I've been using Slither for some time now and found it quite useful, so thanks for all the effort you put into developing this tool.
While using the `inheritance-graph` printer, I found that the resulting graph contains several redundant inheritance lines that should be better left off the graph. For instance:
~~~solidity
pragma solidity ^0.4.25;
contract Grandparent { }
contract Parent is Grandparent {}
contract Child is Parent {}
~~~
Slither's printer output is:

Note that there's a line that goes from `Child` to `Grandparent` which is redundant, as `Child` is already `Parent` and `Parent` is already `Grandparent`. While this example is pretty simple, in far more complex inheritance chains these extra lines add little to no value and the graph ends up being too cluttered with lines pointing everywhere.
I'm not sure if this is a feature you added on purpose or a bug. If the former, are you considering adding an extra flag to the printer so those extra lines are not included in the final inheritance graph?.
</issue>
<code>
[start of slither/printers/inheritance/inheritance.py]
1 """
2 Module printing the inheritance relation
3
4 The inheritance shows the relation between the contracts
5 """
6
7 from slither.printers.abstract_printer import AbstractPrinter
8 from slither.utils.colors import blue, green
9
10
11 class PrinterInheritance(AbstractPrinter):
12 ARGUMENT = 'inheritance'
13 HELP = 'Print the inheritance relations between contracts'
14
15 def _get_child_contracts(self, base):
16 # Generate function to get all child contracts of a base contract
17 for child in self.contracts:
18 if base in child.inheritance:
19 yield child
20
21 def output(self, filename):
22 """
23 Output the inheritance relation
24
25 _filename is not used
26 Args:
27 _filename(string)
28 """
29 info = 'Inheritance\n'
30
31 if not self.contracts:
32 return
33
34 info += blue('Child_Contract -> ') + green('Base_Contracts')
35 for child in self.contracts:
36 info += blue(f'\n+ {child.name}')
37 if child.inheritance:
38 info += ' -> ' + green(", ".join(map(str, child.inheritance)))
39
40 info += green('\n\nBase_Contract -> ') + blue('Child_Contracts')
41 for base in self.contracts:
42 info += green(f'\n+ {base.name}')
43 children = list(self._get_child_contracts(base))
44 if children:
45 info += ' -> ' + blue(", ".join(map(str, children)))
46 self.info(info)
47
[end of slither/printers/inheritance/inheritance.py]
[start of slither/printers/inheritance/inheritance_graph.py]
1 """
2 Module printing the inheritance graph
3
4 The inheritance graph shows the relation between the contracts
5 and their functions/modifiers/public variables.
6 The output is a dot file named filename.dot
7 """
8
9 from slither.core.declarations.contract import Contract
10 from slither.detectors.shadowing.shadowing_functions import ShadowingFunctionsDetection
11 from slither.printers.abstract_printer import AbstractPrinter
12
13 class PrinterInheritanceGraph(AbstractPrinter):
14 ARGUMENT = 'inheritance-graph'
15 HELP = 'Export the inheritance graph of each contract to a dot file'
16
17 def __init__(self, slither, logger):
18 super(PrinterInheritanceGraph, self).__init__(slither, logger)
19
20 inheritance = [x.inheritance for x in slither.contracts]
21 self.inheritance = set([item for sublist in inheritance for item in sublist])
22
23 shadow = ShadowingFunctionsDetection(slither, None)
24 ret = shadow.detect()
25 functions_shadowed = {}
26 for s in ret:
27 if s['contractShadower'] not in functions_shadowed:
28 functions_shadowed[s['contractShadower']] = []
29 functions_shadowed[s['contractShadower']] += s['functions']
30 self.functions_shadowed = functions_shadowed
31
32 def _get_pattern_func(self, func, contract):
33 # Html pattern, each line is a row in a table
34 func_name = func.full_name
35 pattern = '<TR><TD align="left"> %s</TD></TR>'
36 pattern_shadow = '<TR><TD align="left"><font color="#FFA500"> %s</font></TD></TR>'
37 if contract.name in self.functions_shadowed:
38 if func_name in self.functions_shadowed[contract.name]:
39 return pattern_shadow % func_name
40 return pattern % func_name
41
42 def _get_pattern_var(self, var, contract):
43 # Html pattern, each line is a row in a table
44 var_name = var.name
45 pattern = '<TR><TD align="left"> %s</TD></TR>'
46 pattern_contract = '<TR><TD align="left"> %s<font color="blue" POINT-SIZE="10"> (%s)</font></TD></TR>'
47 # pattern_arrow = '<TR><TD align="left" PORT="%s"><font color="blue"> %s</font></TD></TR>'
48 if isinstance(var.type, Contract):
49 return pattern_contract % (var_name, str(var.type))
50 # return pattern_arrow%(self._get_port_id(var, contract), var_name)
51 return pattern % var_name
52
53 def _get_port_id(self, var, contract):
54 return "%s%s" % (var.name, contract.name)
55
56 def _summary(self, contract):
57 """
58 Build summary using HTML
59 """
60 ret = ''
61 # Add arrows
62 for i in contract.inheritance:
63 ret += '%s -> %s;\n' % (contract.name, i)
64
65 # Functions
66 visibilities = ['public', 'external']
67 public_functions = [self._get_pattern_func(f, contract) for f in contract.functions if
68 not f.is_constructor and f.contract == contract and f.visibility in visibilities]
69 public_functions = ''.join(public_functions)
70 private_functions = [self._get_pattern_func(f, contract) for f in contract.functions if
71 not f.is_constructor and f.contract == contract and f.visibility not in visibilities]
72 private_functions = ''.join(private_functions)
73 # Modifiers
74 modifiers = [self._get_pattern_func(m, contract) for m in contract.modifiers if m.contract == contract]
75 modifiers = ''.join(modifiers)
76 # Public variables
77 public_variables = [self._get_pattern_var(v, contract) for v in contract.variables if
78 v.visibility in visibilities]
79 public_variables = ''.join(public_variables)
80
81 private_variables = [self._get_pattern_var(v, contract) for v in contract.variables if
82 not v.visibility in visibilities]
83 private_variables = ''.join(private_variables)
84
85 # Build the node label
86 ret += '%s[shape="box"' % contract.name
87 ret += 'label=< <TABLE border="0">'
88 ret += '<TR><TD align="center"><B>%s</B></TD></TR>' % contract.name
89 if public_functions:
90 ret += '<TR><TD align="left"><I>Public Functions:</I></TD></TR>'
91 ret += '%s' % public_functions
92 if private_functions:
93 ret += '<TR><TD align="left"><I>Private Functions:</I></TD></TR>'
94 ret += '%s' % private_functions
95 if modifiers:
96 ret += '<TR><TD align="left"><I>Modifiers:</I></TD></TR>'
97 ret += '%s' % modifiers
98 if public_variables:
99 ret += '<TR><TD align="left"><I>Public Variables:</I></TD></TR>'
100 ret += '%s' % public_variables
101 if private_variables:
102 ret += '<TR><TD align="left"><I>Private Variables:</I></TD></TR>'
103 ret += '%s' % private_variables
104 ret += '</TABLE> >];\n'
105
106 return ret
107
108 def output(self, filename):
109 """
110 Output the graph in filename
111 Args:
112 filename(string)
113 """
114 if filename == '':
115 filename = 'export'
116 if not filename.endswith('.dot'):
117 filename += ".dot"
118 info = 'Inheritance Graph: ' + filename
119 self.info(info)
120 with open(filename, 'w', encoding='utf8') as f:
121 f.write('digraph{\n')
122 for c in self.contracts:
123 f.write(self._summary(c))
124 f.write('}')
125
[end of slither/printers/inheritance/inheritance_graph.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/slither/printers/inheritance/inheritance.py b/slither/printers/inheritance/inheritance.py
--- a/slither/printers/inheritance/inheritance.py
+++ b/slither/printers/inheritance/inheritance.py
@@ -31,16 +31,26 @@
if not self.contracts:
return
- info += blue('Child_Contract -> ') + green('Base_Contracts')
+ info += blue('Child_Contract -> ') + green('Immediate_Base_Contracts')
+ info += green(' [Not_Immediate_Base_Contracts]')
for child in self.contracts:
info += blue(f'\n+ {child.name}')
if child.inheritance:
- info += ' -> ' + green(", ".join(map(str, child.inheritance)))
-
- info += green('\n\nBase_Contract -> ') + blue('Child_Contracts')
+ immediate = child.immediate_inheritance
+ not_immediate = [i for i in child.inheritance if i not in immediate]
+ info += ' -> ' + green(", ".join(map(str, immediate)))
+ if not_immediate:
+ info += ", ["+ green(", ".join(map(str, not_immediate))) + "]"
+
+ info += green('\n\nBase_Contract -> ') + blue('Immediate_Child_Contracts')
+ info += blue(' [Not_Immediate_Child_Contracts]')
for base in self.contracts:
info += green(f'\n+ {base.name}')
children = list(self._get_child_contracts(base))
if children:
- info += ' -> ' + blue(", ".join(map(str, children)))
+ immediate = [child for child in children if base in child.immediate_inheritance]
+ not_immediate = [child for child in children if not child in immediate]
+ info += ' -> ' + blue(", ".join(map(str, immediate)))
+ if not_immediate:
+ info += ', [' + blue(", ".join(map(str, not_immediate))) + ']'
self.info(info)
diff --git a/slither/printers/inheritance/inheritance_graph.py b/slither/printers/inheritance/inheritance_graph.py
--- a/slither/printers/inheritance/inheritance_graph.py
+++ b/slither/printers/inheritance/inheritance_graph.py
@@ -59,7 +59,7 @@
"""
ret = ''
# Add arrows
- for i in contract.inheritance:
+ for i in contract.immediate_inheritance:
ret += '%s -> %s;\n' % (contract.name, i)
# Functions
|
{"golden_diff": "diff --git a/slither/printers/inheritance/inheritance.py b/slither/printers/inheritance/inheritance.py\n--- a/slither/printers/inheritance/inheritance.py\n+++ b/slither/printers/inheritance/inheritance.py\n@@ -31,16 +31,26 @@\n if not self.contracts:\n return\n \n- info += blue('Child_Contract -> ') + green('Base_Contracts')\n+ info += blue('Child_Contract -> ') + green('Immediate_Base_Contracts')\n+ info += green(' [Not_Immediate_Base_Contracts]')\n for child in self.contracts:\n info += blue(f'\\n+ {child.name}')\n if child.inheritance:\n- info += ' -> ' + green(\", \".join(map(str, child.inheritance)))\n-\n- info += green('\\n\\nBase_Contract -> ') + blue('Child_Contracts')\n+ immediate = child.immediate_inheritance\n+ not_immediate = [i for i in child.inheritance if i not in immediate]\n+ info += ' -> ' + green(\", \".join(map(str, immediate)))\n+ if not_immediate:\n+ info += \", [\"+ green(\", \".join(map(str, not_immediate))) + \"]\"\n+\n+ info += green('\\n\\nBase_Contract -> ') + blue('Immediate_Child_Contracts')\n+ info += blue(' [Not_Immediate_Child_Contracts]')\n for base in self.contracts:\n info += green(f'\\n+ {base.name}')\n children = list(self._get_child_contracts(base))\n if children:\n- info += ' -> ' + blue(\", \".join(map(str, children)))\n+ immediate = [child for child in children if base in child.immediate_inheritance]\n+ not_immediate = [child for child in children if not child in immediate]\n+ info += ' -> ' + blue(\", \".join(map(str, immediate)))\n+ if not_immediate:\n+ info += ', [' + blue(\", \".join(map(str, not_immediate))) + ']'\n self.info(info)\ndiff --git a/slither/printers/inheritance/inheritance_graph.py b/slither/printers/inheritance/inheritance_graph.py\n--- a/slither/printers/inheritance/inheritance_graph.py\n+++ b/slither/printers/inheritance/inheritance_graph.py\n@@ -59,7 +59,7 @@\n \"\"\"\n ret = ''\n # Add arrows\n- for i in contract.inheritance:\n+ for i in contract.immediate_inheritance:\n ret += '%s -> %s;\\n' % (contract.name, i)\n \n # Functions\n", "issue": "Redundant inheritance lines in inheritance-graph printer\nHey guys! I've been using Slither for some time now and found it quite useful, so thanks for all the effort you put into developing this tool.\r\n\r\nWhile using the `inheritance-graph` printer, I found that the resulting graph contains several redundant inheritance lines that should be better left off the graph. For instance: \r\n~~~solidity\r\npragma solidity ^0.4.25;\r\ncontract Grandparent { }\r\ncontract Parent is Grandparent {}\r\ncontract Child is Parent {}\r\n~~~\r\n\r\nSlither's printer output is:\r\n\r\n\r\nNote that there's a line that goes from `Child` to `Grandparent` which is redundant, as `Child` is already `Parent` and `Parent` is already `Grandparent`. While this example is pretty simple, in far more complex inheritance chains these extra lines add little to no value and the graph ends up being too cluttered with lines pointing everywhere.\r\n\r\nI'm not sure if this is a feature you added on purpose or a bug. If the former, are you considering adding an extra flag to the printer so those extra lines are not included in the final inheritance graph?.\n", "before_files": [{"content": "\"\"\"\n Module printing the inheritance relation\n\n The inheritance shows the relation between the contracts\n\"\"\"\n\nfrom slither.printers.abstract_printer import AbstractPrinter\nfrom slither.utils.colors import blue, green\n\n\nclass PrinterInheritance(AbstractPrinter):\n ARGUMENT = 'inheritance'\n HELP = 'Print the inheritance relations between contracts'\n\n def _get_child_contracts(self, base):\n # Generate function to get all child contracts of a base contract\n for child in self.contracts:\n if base in child.inheritance:\n yield child\n\n def output(self, filename):\n \"\"\"\n Output the inheritance relation\n\n _filename is not used\n Args:\n _filename(string)\n \"\"\"\n info = 'Inheritance\\n'\n\n if not self.contracts:\n return\n\n info += blue('Child_Contract -> ') + green('Base_Contracts')\n for child in self.contracts:\n info += blue(f'\\n+ {child.name}')\n if child.inheritance:\n info += ' -> ' + green(\", \".join(map(str, child.inheritance)))\n\n info += green('\\n\\nBase_Contract -> ') + blue('Child_Contracts')\n for base in self.contracts:\n info += green(f'\\n+ {base.name}')\n children = list(self._get_child_contracts(base))\n if children:\n info += ' -> ' + blue(\", \".join(map(str, children)))\n self.info(info)\n", "path": "slither/printers/inheritance/inheritance.py"}, {"content": "\"\"\"\n Module printing the inheritance graph\n\n The inheritance graph shows the relation between the contracts\n and their functions/modifiers/public variables.\n The output is a dot file named filename.dot\n\"\"\"\n\nfrom slither.core.declarations.contract import Contract\nfrom slither.detectors.shadowing.shadowing_functions import ShadowingFunctionsDetection\nfrom slither.printers.abstract_printer import AbstractPrinter\n\nclass PrinterInheritanceGraph(AbstractPrinter):\n ARGUMENT = 'inheritance-graph'\n HELP = 'Export the inheritance graph of each contract to a dot file'\n\n def __init__(self, slither, logger):\n super(PrinterInheritanceGraph, self).__init__(slither, logger)\n\n inheritance = [x.inheritance for x in slither.contracts]\n self.inheritance = set([item for sublist in inheritance for item in sublist])\n\n shadow = ShadowingFunctionsDetection(slither, None)\n ret = shadow.detect()\n functions_shadowed = {}\n for s in ret:\n if s['contractShadower'] not in functions_shadowed:\n functions_shadowed[s['contractShadower']] = []\n functions_shadowed[s['contractShadower']] += s['functions']\n self.functions_shadowed = functions_shadowed\n\n def _get_pattern_func(self, func, contract):\n # Html pattern, each line is a row in a table\n func_name = func.full_name\n pattern = '<TR><TD align=\"left\"> %s</TD></TR>'\n pattern_shadow = '<TR><TD align=\"left\"><font color=\"#FFA500\"> %s</font></TD></TR>'\n if contract.name in self.functions_shadowed:\n if func_name in self.functions_shadowed[contract.name]:\n return pattern_shadow % func_name\n return pattern % func_name\n\n def _get_pattern_var(self, var, contract):\n # Html pattern, each line is a row in a table\n var_name = var.name\n pattern = '<TR><TD align=\"left\"> %s</TD></TR>'\n pattern_contract = '<TR><TD align=\"left\"> %s<font color=\"blue\" POINT-SIZE=\"10\"> (%s)</font></TD></TR>'\n # pattern_arrow = '<TR><TD align=\"left\" PORT=\"%s\"><font color=\"blue\"> %s</font></TD></TR>'\n if isinstance(var.type, Contract):\n return pattern_contract % (var_name, str(var.type))\n # return pattern_arrow%(self._get_port_id(var, contract), var_name)\n return pattern % var_name\n\n def _get_port_id(self, var, contract):\n return \"%s%s\" % (var.name, contract.name)\n\n def _summary(self, contract):\n \"\"\"\n Build summary using HTML\n \"\"\"\n ret = ''\n # Add arrows\n for i in contract.inheritance:\n ret += '%s -> %s;\\n' % (contract.name, i)\n\n # Functions\n visibilities = ['public', 'external']\n public_functions = [self._get_pattern_func(f, contract) for f in contract.functions if\n not f.is_constructor and f.contract == contract and f.visibility in visibilities]\n public_functions = ''.join(public_functions)\n private_functions = [self._get_pattern_func(f, contract) for f in contract.functions if\n not f.is_constructor and f.contract == contract and f.visibility not in visibilities]\n private_functions = ''.join(private_functions)\n # Modifiers\n modifiers = [self._get_pattern_func(m, contract) for m in contract.modifiers if m.contract == contract]\n modifiers = ''.join(modifiers)\n # Public variables\n public_variables = [self._get_pattern_var(v, contract) for v in contract.variables if\n v.visibility in visibilities]\n public_variables = ''.join(public_variables)\n\n private_variables = [self._get_pattern_var(v, contract) for v in contract.variables if\n not v.visibility in visibilities]\n private_variables = ''.join(private_variables)\n\n # Build the node label\n ret += '%s[shape=\"box\"' % contract.name\n ret += 'label=< <TABLE border=\"0\">'\n ret += '<TR><TD align=\"center\"><B>%s</B></TD></TR>' % contract.name\n if public_functions:\n ret += '<TR><TD align=\"left\"><I>Public Functions:</I></TD></TR>'\n ret += '%s' % public_functions\n if private_functions:\n ret += '<TR><TD align=\"left\"><I>Private Functions:</I></TD></TR>'\n ret += '%s' % private_functions\n if modifiers:\n ret += '<TR><TD align=\"left\"><I>Modifiers:</I></TD></TR>'\n ret += '%s' % modifiers\n if public_variables:\n ret += '<TR><TD align=\"left\"><I>Public Variables:</I></TD></TR>'\n ret += '%s' % public_variables\n if private_variables:\n ret += '<TR><TD align=\"left\"><I>Private Variables:</I></TD></TR>'\n ret += '%s' % private_variables\n ret += '</TABLE> >];\\n'\n\n return ret\n\n def output(self, filename):\n \"\"\"\n Output the graph in filename\n Args:\n filename(string)\n \"\"\"\n if filename == '':\n filename = 'export'\n if not filename.endswith('.dot'):\n filename += \".dot\"\n info = 'Inheritance Graph: ' + filename\n self.info(info)\n with open(filename, 'w', encoding='utf8') as f:\n f.write('digraph{\\n')\n for c in self.contracts:\n f.write(self._summary(c))\n f.write('}')\n", "path": "slither/printers/inheritance/inheritance_graph.py"}]}
| 2,774 | 576 |
gh_patches_debug_4290
|
rasdani/github-patches
|
git_diff
|
ansible__ansible-modules-extras-1530
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
rabbitmq_parameter fails when passing dynamically generated json
##### Issue Type:
“Bug Report”
##### Ansible Version:
ansible 1.8.2
configured module search path = None
Tested also on ansible 1.9
ansible 1.9 (devel 8f06ba2bc1) last updated 2015/02/05 11:16:37 (GMT +200)
lib/ansible/modules/core: (detached HEAD 600fc15b42) last updated 2015/02/05 11:16:50 (GMT +200)
lib/ansible/modules/extras: (detached HEAD 77485f6c6a) last updated 2015/02/05 11:17:01 (GMT +200)
v2/ansible/modules/core: (detached HEAD 600fc15b42) last updated 2015/02/05 11:17:13 (GMT +200)
v2/ansible/modules/extras: (detached HEAD 77485f6c6a) last updated 2015/02/05 11:17:36 (GMT +200)
configured module search path = None
##### Environment:
OS X 10.9.5 > Ubuntu 14.04
##### Summary:
Passing non-hardcoded json (through templating) to rabbitmq_parameter's "value" argument raises an exception. It works when hardcoding the json though.
##### Steps To Reproduce:
Here's an example playbook with one successful task (the first one), and two failing tasks (the next two) which both raise the same exception (see below). All of them are supposed to do the same thing.
```
- hosts: my-host
tasks:
- set_fact:
upstream_list:
- upstream: "test"
- upstream: "test2"
json_upstreams: '[{"upstream":"test1"},{"upstream":"test2"}]'
- rabbitmq_parameter:
component: federation-upstream-set
name: test-upstream-set
value: '[{"upstream":"test1"},{"upstream":"test2"}]'
sudo: yes
- rabbitmq_parameter:
component: federation-upstream-set
name: test-upstream-set
value: '{{ json_upstreams }}'
sudo: yes
- rabbitmq_parameter:
component: federation-upstream-set
name: test-upstream-set
value: '{{ upstream_list|to_json }}'
sudo: yes
```
##### Expected Results:
Set a federation-upstream-set parameter to the default vhost.
##### Actual Results:
The two failing tasks raise the same exception. The output from ansible with -vvvv is as follows:
```
<my-host> ESTABLISH CONNECTION FOR USER: xxxx
<my-host> REMOTE_MODULE rabbitmq_parameter name=test-upstream-set component=federation-upstream-set vhost=/
<my-host> EXEC ssh -C -vvv -o ForwardAgent=yes -o ControlMaster=auto -o ControlPersist=30m -o ControlPath="xxxx/.ansible/cp/ansible-ssh-%h-%p-%r" -o Port=22 -o KbdInteractiveAuthentication=no -o PreferredAuthentications=gssapi-with-mic,gssapi-k
eyex,hostbased,publickey -o PasswordAuthentication=no -o User=xxxx -o ConnectTimeout=10 my- host /bin/sh -c 'sudo -k && sudo -H -S -p "[sudo via ansible, key=xxxxxx] password: " -u root /bin/sh -c '"'"'echo SUDO-SUCCESS-xxxxxx; LANG=en_US.UTF-8 LC_CTYPE=en_US.UTF-8 /usr/bin/python'"'"''
failed: [my-host] => {"failed": true, "parsed": false}
SUDO-SUCCESS-xxxxxx
OpenSSH_6.2p2, OSSLShim 0.9.8r 8 Dec 2011
debug1: Reading configuration data /xxxxx/.ssh/config
debug1: Reading configuration data /etc/ssh_config
debug1: /etc/ssh_config line 20: Applying options for *
debug1: /etc/ssh_config line 102: Applying options for *
debug1: auto-mux: Trying existing master
debug2: fd 3 setting O_NONBLOCK
debug2: mux_client_hello_exchange: master version 4
debug3: mux_client_forwards: request forwardings: 0 local, 0 remote
debug3: mux_client_request_session: entering
debug3: mux_client_request_alive: entering
debug3: mux_client_request_alive: done pid = 58450
debug3: mux_client_request_session: session request sent
debug1: mux_client_request_session: master session id: 2
[sudo via ansible, key=xxxxx] password: Traceback (most recent call last):
File "<stdin>", line 1717, in <module>
File "<stdin>", line 142, in main
File "<stdin>", line 104, in set
File "<stdin>", line 88, in _exec
File "<stdin>", line 1566, in run_command
File "/usr/lib/python2.7/posixpath.py", line 261, in expanduser
if not path.startswith('~'):
AttributeError: 'list' object has no attribute 'startswith'
debug3: mux_client_read_packet: read header failed: Broken pipe
debug2: Received exit status from master 1
```
##### Tentative solution:
From what I've seen while troubleshooting the issue, the problem comes from the rabbitmq_parameter module being passed a list to its "value" argument. It is then passed to AnsibleModule::run_command, which assumes it is a string and passes it through os.path.expanduser.
I've managed to correct this behavior by setting the value to `value if type(value) == str else json.dumps(value)`.
I can submit a more complete patch or pull request if this is indeed a bug and not an issue in my playbook.
</issue>
<code>
[start of messaging/rabbitmq_parameter.py]
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # (c) 2013, Chatham Financial <[email protected]>
5 #
6 # This file is part of Ansible
7 #
8 # Ansible is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU General Public License as published by
10 # the Free Software Foundation, either version 3 of the License, or
11 # (at your option) any later version.
12 #
13 # Ansible is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
17 #
18 # You should have received a copy of the GNU General Public License
19 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
20
21 DOCUMENTATION = '''
22 ---
23 module: rabbitmq_parameter
24 short_description: Adds or removes parameters to RabbitMQ
25 description:
26 - Manage dynamic, cluster-wide parameters for RabbitMQ
27 version_added: "1.1"
28 author: '"Chris Hoffman (@chrishoffman)"'
29 options:
30 component:
31 description:
32 - Name of the component of which the parameter is being set
33 required: true
34 default: null
35 name:
36 description:
37 - Name of the parameter being set
38 required: true
39 default: null
40 value:
41 description:
42 - Value of the parameter, as a JSON term
43 required: false
44 default: null
45 vhost:
46 description:
47 - vhost to apply access privileges.
48 required: false
49 default: /
50 node:
51 description:
52 - erlang node name of the rabbit we wish to configure
53 required: false
54 default: rabbit
55 version_added: "1.2"
56 state:
57 description:
58 - Specify if user is to be added or removed
59 required: false
60 default: present
61 choices: [ 'present', 'absent']
62 '''
63
64 EXAMPLES = """
65 # Set the federation parameter 'local_username' to a value of 'guest' (in quotes)
66 - rabbitmq_parameter: component=federation
67 name=local-username
68 value='"guest"'
69 state=present
70 """
71
72 class RabbitMqParameter(object):
73 def __init__(self, module, component, name, value, vhost, node):
74 self.module = module
75 self.component = component
76 self.name = name
77 self.value = value
78 self.vhost = vhost
79 self.node = node
80
81 self._value = None
82
83 self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True)
84
85 def _exec(self, args, run_in_check_mode=False):
86 if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
87 cmd = [self._rabbitmqctl, '-q', '-n', self.node]
88 rc, out, err = self.module.run_command(cmd + args, check_rc=True)
89 return out.splitlines()
90 return list()
91
92 def get(self):
93 parameters = self._exec(['list_parameters', '-p', self.vhost], True)
94
95 for param_item in parameters:
96 component, name, value = param_item.split('\t')
97
98 if component == self.component and name == self.name:
99 self._value = value
100 return True
101 return False
102
103 def set(self):
104 self._exec(['set_parameter', '-p', self.vhost, self.component, self.name, self.value])
105
106 def delete(self):
107 self._exec(['clear_parameter', '-p', self.vhost, self.component, self.name])
108
109 def has_modifications(self):
110 return self.value != self._value
111
112 def main():
113 arg_spec = dict(
114 component=dict(required=True),
115 name=dict(required=True),
116 value=dict(default=None),
117 vhost=dict(default='/'),
118 state=dict(default='present', choices=['present', 'absent']),
119 node=dict(default='rabbit')
120 )
121 module = AnsibleModule(
122 argument_spec=arg_spec,
123 supports_check_mode=True
124 )
125
126 component = module.params['component']
127 name = module.params['name']
128 value = module.params['value']
129 vhost = module.params['vhost']
130 state = module.params['state']
131 node = module.params['node']
132
133 rabbitmq_parameter = RabbitMqParameter(module, component, name, value, vhost, node)
134
135 changed = False
136 if rabbitmq_parameter.get():
137 if state == 'absent':
138 rabbitmq_parameter.delete()
139 changed = True
140 else:
141 if rabbitmq_parameter.has_modifications():
142 rabbitmq_parameter.set()
143 changed = True
144 elif state == 'present':
145 rabbitmq_parameter.set()
146 changed = True
147
148 module.exit_json(changed=changed, component=component, name=name, vhost=vhost, state=state)
149
150 # import module snippets
151 from ansible.module_utils.basic import *
152 main()
153
[end of messaging/rabbitmq_parameter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/messaging/rabbitmq_parameter.py b/messaging/rabbitmq_parameter.py
--- a/messaging/rabbitmq_parameter.py
+++ b/messaging/rabbitmq_parameter.py
@@ -126,6 +126,8 @@
component = module.params['component']
name = module.params['name']
value = module.params['value']
+ if not isinstance(value, str):
+ value = json.dumps(value)
vhost = module.params['vhost']
state = module.params['state']
node = module.params['node']
|
{"golden_diff": "diff --git a/messaging/rabbitmq_parameter.py b/messaging/rabbitmq_parameter.py\n--- a/messaging/rabbitmq_parameter.py\n+++ b/messaging/rabbitmq_parameter.py\n@@ -126,6 +126,8 @@\n component = module.params['component']\n name = module.params['name']\n value = module.params['value']\n+ if not isinstance(value, str):\n+ value = json.dumps(value)\n vhost = module.params['vhost']\n state = module.params['state']\n node = module.params['node']\n", "issue": "rabbitmq_parameter fails when passing dynamically generated json\n##### Issue Type:\n\n\u201cBug Report\u201d\n##### Ansible Version:\n\nansible 1.8.2\n configured module search path = None\n\nTested also on ansible 1.9\nansible 1.9 (devel 8f06ba2bc1) last updated 2015/02/05 11:16:37 (GMT +200)\n lib/ansible/modules/core: (detached HEAD 600fc15b42) last updated 2015/02/05 11:16:50 (GMT +200)\n lib/ansible/modules/extras: (detached HEAD 77485f6c6a) last updated 2015/02/05 11:17:01 (GMT +200)\n v2/ansible/modules/core: (detached HEAD 600fc15b42) last updated 2015/02/05 11:17:13 (GMT +200)\n v2/ansible/modules/extras: (detached HEAD 77485f6c6a) last updated 2015/02/05 11:17:36 (GMT +200)\n configured module search path = None\n##### Environment:\n\nOS X 10.9.5 > Ubuntu 14.04\n##### Summary:\n\nPassing non-hardcoded json (through templating) to rabbitmq_parameter's \"value\" argument raises an exception. It works when hardcoding the json though.\n##### Steps To Reproduce:\n\nHere's an example playbook with one successful task (the first one), and two failing tasks (the next two) which both raise the same exception (see below). All of them are supposed to do the same thing.\n\n```\n- hosts: my-host\n tasks:\n - set_fact:\n upstream_list:\n - upstream: \"test\"\n - upstream: \"test2\"\n json_upstreams: '[{\"upstream\":\"test1\"},{\"upstream\":\"test2\"}]'\n\n - rabbitmq_parameter:\n component: federation-upstream-set\n name: test-upstream-set\n value: '[{\"upstream\":\"test1\"},{\"upstream\":\"test2\"}]'\n sudo: yes\n\n - rabbitmq_parameter:\n component: federation-upstream-set\n name: test-upstream-set\n value: '{{ json_upstreams }}'\n sudo: yes\n\n - rabbitmq_parameter:\n component: federation-upstream-set\n name: test-upstream-set\n value: '{{ upstream_list|to_json }}'\n sudo: yes\n```\n##### Expected Results:\n\nSet a federation-upstream-set parameter to the default vhost.\n##### Actual Results:\n\nThe two failing tasks raise the same exception. The output from ansible with -vvvv is as follows:\n\n```\n<my-host> ESTABLISH CONNECTION FOR USER: xxxx\n<my-host> REMOTE_MODULE rabbitmq_parameter name=test-upstream-set component=federation-upstream-set vhost=/\n<my-host> EXEC ssh -C -vvv -o ForwardAgent=yes -o ControlMaster=auto -o ControlPersist=30m -o ControlPath=\"xxxx/.ansible/cp/ansible-ssh-%h-%p-%r\" -o Port=22 -o KbdInteractiveAuthentication=no -o PreferredAuthentications=gssapi-with-mic,gssapi-k\neyex,hostbased,publickey -o PasswordAuthentication=no -o User=xxxx -o ConnectTimeout=10 my- host /bin/sh -c 'sudo -k && sudo -H -S -p \"[sudo via ansible, key=xxxxxx] password: \" -u root /bin/sh -c '\"'\"'echo SUDO-SUCCESS-xxxxxx; LANG=en_US.UTF-8 LC_CTYPE=en_US.UTF-8 /usr/bin/python'\"'\"''\nfailed: [my-host] => {\"failed\": true, \"parsed\": false}\nSUDO-SUCCESS-xxxxxx\nOpenSSH_6.2p2, OSSLShim 0.9.8r 8 Dec 2011\ndebug1: Reading configuration data /xxxxx/.ssh/config\ndebug1: Reading configuration data /etc/ssh_config\ndebug1: /etc/ssh_config line 20: Applying options for *\ndebug1: /etc/ssh_config line 102: Applying options for *\ndebug1: auto-mux: Trying existing master\ndebug2: fd 3 setting O_NONBLOCK\ndebug2: mux_client_hello_exchange: master version 4\ndebug3: mux_client_forwards: request forwardings: 0 local, 0 remote\ndebug3: mux_client_request_session: entering\ndebug3: mux_client_request_alive: entering\ndebug3: mux_client_request_alive: done pid = 58450\ndebug3: mux_client_request_session: session request sent\ndebug1: mux_client_request_session: master session id: 2\n[sudo via ansible, key=xxxxx] password: Traceback (most recent call last):\n File \"<stdin>\", line 1717, in <module>\n File \"<stdin>\", line 142, in main\n File \"<stdin>\", line 104, in set\n File \"<stdin>\", line 88, in _exec\n File \"<stdin>\", line 1566, in run_command\n File \"/usr/lib/python2.7/posixpath.py\", line 261, in expanduser\n if not path.startswith('~'):\nAttributeError: 'list' object has no attribute 'startswith'\ndebug3: mux_client_read_packet: read header failed: Broken pipe\ndebug2: Received exit status from master 1\n```\n##### Tentative solution:\n\nFrom what I've seen while troubleshooting the issue, the problem comes from the rabbitmq_parameter module being passed a list to its \"value\" argument. It is then passed to AnsibleModule::run_command, which assumes it is a string and passes it through os.path.expanduser.\nI've managed to correct this behavior by setting the value to `value if type(value) == str else json.dumps(value)`.\nI can submit a more complete patch or pull request if this is indeed a bug and not an issue in my playbook.\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2013, Chatham Financial <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = '''\n---\nmodule: rabbitmq_parameter\nshort_description: Adds or removes parameters to RabbitMQ\ndescription:\n - Manage dynamic, cluster-wide parameters for RabbitMQ\nversion_added: \"1.1\"\nauthor: '\"Chris Hoffman (@chrishoffman)\"'\noptions:\n component:\n description:\n - Name of the component of which the parameter is being set\n required: true\n default: null\n name:\n description:\n - Name of the parameter being set\n required: true\n default: null\n value:\n description:\n - Value of the parameter, as a JSON term\n required: false\n default: null\n vhost:\n description:\n - vhost to apply access privileges.\n required: false\n default: /\n node:\n description:\n - erlang node name of the rabbit we wish to configure\n required: false\n default: rabbit\n version_added: \"1.2\"\n state:\n description:\n - Specify if user is to be added or removed\n required: false\n default: present\n choices: [ 'present', 'absent']\n'''\n\nEXAMPLES = \"\"\"\n# Set the federation parameter 'local_username' to a value of 'guest' (in quotes)\n- rabbitmq_parameter: component=federation\n name=local-username\n value='\"guest\"'\n state=present\n\"\"\"\n\nclass RabbitMqParameter(object):\n def __init__(self, module, component, name, value, vhost, node):\n self.module = module\n self.component = component\n self.name = name\n self.value = value\n self.vhost = vhost\n self.node = node\n\n self._value = None\n\n self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True)\n\n def _exec(self, args, run_in_check_mode=False):\n if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):\n cmd = [self._rabbitmqctl, '-q', '-n', self.node]\n rc, out, err = self.module.run_command(cmd + args, check_rc=True)\n return out.splitlines()\n return list()\n\n def get(self):\n parameters = self._exec(['list_parameters', '-p', self.vhost], True)\n\n for param_item in parameters:\n component, name, value = param_item.split('\\t')\n\n if component == self.component and name == self.name:\n self._value = value\n return True\n return False\n\n def set(self):\n self._exec(['set_parameter', '-p', self.vhost, self.component, self.name, self.value])\n\n def delete(self):\n self._exec(['clear_parameter', '-p', self.vhost, self.component, self.name])\n\n def has_modifications(self):\n return self.value != self._value\n\ndef main():\n arg_spec = dict(\n component=dict(required=True),\n name=dict(required=True),\n value=dict(default=None),\n vhost=dict(default='/'),\n state=dict(default='present', choices=['present', 'absent']),\n node=dict(default='rabbit')\n )\n module = AnsibleModule(\n argument_spec=arg_spec,\n supports_check_mode=True\n )\n\n component = module.params['component']\n name = module.params['name']\n value = module.params['value']\n vhost = module.params['vhost']\n state = module.params['state']\n node = module.params['node']\n\n rabbitmq_parameter = RabbitMqParameter(module, component, name, value, vhost, node)\n\n changed = False\n if rabbitmq_parameter.get():\n if state == 'absent':\n rabbitmq_parameter.delete()\n changed = True\n else:\n if rabbitmq_parameter.has_modifications():\n rabbitmq_parameter.set()\n changed = True\n elif state == 'present':\n rabbitmq_parameter.set()\n changed = True\n\n module.exit_json(changed=changed, component=component, name=name, vhost=vhost, state=state)\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nmain()\n", "path": "messaging/rabbitmq_parameter.py"}]}
| 3,341 | 121 |
gh_patches_debug_39198
|
rasdani/github-patches
|
git_diff
|
sktime__sktime-337
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Extend NaiveForecaster
Extend `NaiveForecaster` to include all common naive forecasting strategies. For an overview, see this [chapter](https://otexts.com/fpp2/simple-methods.html#simple-methods).
- [x] introduce `seasonal` as boolean kwarg, refactor "seasonal_last" and implement "seasonal_mean", so that we can set `seasonal=True` and `strategy="mean"` for example
- [x] add "drift" strategy, the forecasts should be similar to the forecasts from `ReducedRegressionForecaster(regressor=LinearRegression(), ...)`
- [ ] implement more efficient `update_predict` routine
</issue>
<code>
[start of sktime/forecasting/naive.py]
1 #!/usr/bin/env python3 -u
2 # coding: utf-8
3 # copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
4
5 __all__ = ["NaiveForecaster"]
6 __author__ = ["Markus Löning", "Piyush Gade"]
7
8 from warnings import warn
9
10 import numpy as np
11 from sktime.forecasting.base._base import DEFAULT_ALPHA
12 from sktime.forecasting.base._sktime import BaseLastWindowForecaster
13 from sktime.forecasting.base._sktime import OptionalForecastingHorizonMixin
14 from sktime.utils.validation.forecasting import check_sp
15 from sktime.utils.validation.forecasting import check_window_length
16
17
18 class NaiveForecaster(OptionalForecastingHorizonMixin,
19 BaseLastWindowForecaster):
20 """
21 NaiveForecaster is a forecaster that makes forecasts using simple
22 strategies.
23
24 Parameters
25 ----------
26 strategy : str{"last", "mean"}, optional (default="last")
27 Strategy used to make forecasts:
28
29 * "last" : forecast the last value in the
30 training series when sp is 1.
31 When sp is not 1,
32 last value of each season
33 in the last window will be
34 forecasted for each season.
35 * "mean" : forecast the mean of last window
36 of training series when sp is 1.
37 When sp is not 1, mean of all values
38 in a season from last window will be
39 forecasted for each season.
40
41 sp : int, optional (default=1)
42 Seasonal periodicity to use in the seasonal forecasting.
43
44 window_length : int or None, optional (default=None)
45 Window length to use in the `mean` strategy. If None, entire training
46 series will be used.
47 """
48
49 def __init__(self, strategy="last", window_length=None, sp=1):
50 super(NaiveForecaster, self).__init__()
51 self.strategy = strategy
52 self.sp = sp
53 self.window_length = window_length
54
55 def fit(self, y_train, fh=None, X_train=None):
56 """Fit to training data.
57
58 Parameters
59 ----------
60 y_train : pd.Series
61 Target time series to which to fit the forecaster.
62 fh : int, list or np.array, optional (default=None)
63 The forecasters horizon with the steps ahead to to predict.
64 X_train : pd.DataFrame, optional (default=None)
65 Exogenous variables are ignored
66 Returns
67 -------
68 self : returns an instance of self.
69 """ # X_train is ignored
70 self._set_oh(y_train)
71 self._set_fh(fh)
72
73 if self.strategy == "last":
74 if self.sp == 1:
75 if self.window_length is not None:
76 warn("For the `last` strategy, "
77 "the `window_length` value will be ignored if `sp` "
78 "== 1.")
79 self.window_length_ = 1
80
81 else:
82 self.sp_ = check_sp(self.sp)
83
84 # window length we need for forecasts is just the
85 # length of seasonal periodicity
86 self.window_length_ = self.sp_
87
88 elif self.strategy == "mean":
89 # check window length is greater than sp for seasonal mean
90 if self.window_length is not None and self.sp != 1:
91 if self.window_length < self.sp:
92 raise ValueError(f"The `window_length`: "
93 f"{self.window_length} is smaller than "
94 f"`sp`: {self.sp}.")
95 self.window_length_ = check_window_length(self.window_length)
96 self.sp_ = check_sp(self.sp)
97
98 # if not given, set default window length for the mean strategy
99 if self.window_length is None:
100 self.window_length_ = len(y_train)
101
102 else:
103 allowed_strategies = ("last", "mean")
104 raise ValueError(f"Unknown strategy: {self.strategy}. Expected "
105 f"one of: {allowed_strategies}.")
106
107 # check window length
108 if self.window_length_ > len(self.oh):
109 param = "sp" if self.strategy == "last" and self.sp != 1 \
110 else "window_length_"
111 raise ValueError(
112 f"The {param}: {self.window_length_} is larger than "
113 f"the training series.")
114
115 self._is_fitted = True
116 return self
117
118 def _predict_last_window(self, fh, X=None, return_pred_int=False,
119 alpha=DEFAULT_ALPHA):
120 """Internal predict"""
121 last_window = self._get_last_window()
122
123 # if last window only contains missing values, return nan
124 if np.all(np.isnan(last_window)) or len(last_window) == 0:
125 return self._predict_nan(fh)
126
127 elif self.strategy == "last":
128 if self.sp == 1:
129 return np.repeat(last_window[-1], len(fh))
130
131 else:
132 # we need to replicate the last window if max(fh) is larger
133 # than sp,so that we still make forecasts by repeating the
134 # last value for that season, assume fh is sorted, i.e. max(
135 # fh) == fh[-1]
136 if fh[-1] > self.sp_:
137 reps = np.int(np.ceil(fh[-1] / self.sp_))
138 last_window = np.tile(last_window, reps=reps)
139
140 # get zero-based index by subtracting the minimum
141 fh_idx = fh.index_like(self.cutoff)
142 return last_window[fh_idx]
143
144 elif self.strategy == "mean":
145 if self.sp == 1:
146 return np.repeat(np.nanmean(last_window), len(fh))
147
148 else:
149 # if the window length is not a multiple of sp, we pad the
150 # window with nan values for easy computation of the mean
151 remainder = self.window_length_ % self.sp_
152 if remainder > 0:
153 pad_width = self.sp_ - remainder
154 else:
155 pad_width = 0
156 last_window = np.hstack([last_window,
157 np.full(pad_width, np.nan)])
158
159 # reshape last window, one column per season
160 last_window = last_window.reshape(np.int(np.ceil(
161 self.window_length_ / self.sp_)), self.sp_)
162
163 # compute seasonal mean, averaging over rows
164 y_pred = np.nanmean(last_window, axis=0)
165
166 # we need to replicate the last window if max(fh) is
167 # larger than sp,
168 # so that we still make forecasts by repeating the
169 # last value for that season,
170 # assume fh is sorted, i.e. max(fh) == fh[-1]
171 # only slicing all the last seasons into last_window
172 if fh[-1] > self.sp_:
173 reps = np.int(np.ceil(fh[-1] / self.sp_))
174 y_pred = np.tile(y_pred, reps=reps)
175
176 # get zero-based index by subtracting the minimum
177 fh_idx = fh.index_like(self.cutoff)
178 return y_pred[fh_idx]
179
[end of sktime/forecasting/naive.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sktime/forecasting/naive.py b/sktime/forecasting/naive.py
--- a/sktime/forecasting/naive.py
+++ b/sktime/forecasting/naive.py
@@ -23,7 +23,7 @@
Parameters
----------
- strategy : str{"last", "mean"}, optional (default="last")
+ strategy : str{"last", "mean", "drift"}, optional (default="last")
Strategy used to make forecasts:
* "last" : forecast the last value in the
@@ -37,6 +37,9 @@
When sp is not 1, mean of all values
in a season from last window will be
forecasted for each season.
+ * "drift": forecast by fitting a line between the
+ first and last point of the window and
+ extrapolating it into the future
sp : int, optional (default=1)
Seasonal periodicity to use in the seasonal forecasting.
@@ -99,8 +102,22 @@
if self.window_length is None:
self.window_length_ = len(y_train)
+ elif self.strategy == "drift":
+ if self.sp != 1:
+ warn("For the `drift` strategy, "
+ "the `sp` value will be ignored.")
+ # window length we need for forecasts is just the
+ # length of seasonal periodicity
+ self.window_length_ = check_window_length(self.window_length)
+ if self.window_length is None:
+ self.window_length_ = len(y_train)
+ if self.window_length == 1:
+ raise ValueError(f"For the `drift` strategy, "
+ f"the `window_length`: {self.window_length} "
+ f"value must be greater than one.")
+
else:
- allowed_strategies = ("last", "mean")
+ allowed_strategies = ("last", "mean", "drift")
raise ValueError(f"Unknown strategy: {self.strategy}. Expected "
f"one of: {allowed_strategies}.")
@@ -176,3 +193,22 @@
# get zero-based index by subtracting the minimum
fh_idx = fh.index_like(self.cutoff)
return y_pred[fh_idx]
+
+ # if self.strategy == "drift":
+ else:
+ if self.window_length_ != 1:
+ if np.any(np.isnan(last_window[[0, -1]])):
+ raise ValueError(f"For {self.strategy},"
+ f"first and last elements in the last "
+ f"window must not be a missing value.")
+ else:
+ # formula for slope
+ slope = (last_window[-1] -
+ last_window[0]) / (self.window_length_ - 1)
+
+ # get zero-based index by subtracting the minimum
+ fh_idx = fh.index_like(self.cutoff)
+
+ # linear extrapolation
+ y_pred = last_window[-1] + (fh_idx + 1) * slope
+ return y_pred
|
{"golden_diff": "diff --git a/sktime/forecasting/naive.py b/sktime/forecasting/naive.py\n--- a/sktime/forecasting/naive.py\n+++ b/sktime/forecasting/naive.py\n@@ -23,7 +23,7 @@\n \n Parameters\n ----------\n- strategy : str{\"last\", \"mean\"}, optional (default=\"last\")\n+ strategy : str{\"last\", \"mean\", \"drift\"}, optional (default=\"last\")\n Strategy used to make forecasts:\n \n * \"last\" : forecast the last value in the\n@@ -37,6 +37,9 @@\n When sp is not 1, mean of all values\n in a season from last window will be\n forecasted for each season.\n+ * \"drift\": forecast by fitting a line between the\n+ first and last point of the window and\n+ extrapolating it into the future\n \n sp : int, optional (default=1)\n Seasonal periodicity to use in the seasonal forecasting.\n@@ -99,8 +102,22 @@\n if self.window_length is None:\n self.window_length_ = len(y_train)\n \n+ elif self.strategy == \"drift\":\n+ if self.sp != 1:\n+ warn(\"For the `drift` strategy, \"\n+ \"the `sp` value will be ignored.\")\n+ # window length we need for forecasts is just the\n+ # length of seasonal periodicity\n+ self.window_length_ = check_window_length(self.window_length)\n+ if self.window_length is None:\n+ self.window_length_ = len(y_train)\n+ if self.window_length == 1:\n+ raise ValueError(f\"For the `drift` strategy, \"\n+ f\"the `window_length`: {self.window_length} \"\n+ f\"value must be greater than one.\")\n+\n else:\n- allowed_strategies = (\"last\", \"mean\")\n+ allowed_strategies = (\"last\", \"mean\", \"drift\")\n raise ValueError(f\"Unknown strategy: {self.strategy}. Expected \"\n f\"one of: {allowed_strategies}.\")\n \n@@ -176,3 +193,22 @@\n # get zero-based index by subtracting the minimum\n fh_idx = fh.index_like(self.cutoff)\n return y_pred[fh_idx]\n+\n+ # if self.strategy == \"drift\":\n+ else:\n+ if self.window_length_ != 1:\n+ if np.any(np.isnan(last_window[[0, -1]])):\n+ raise ValueError(f\"For {self.strategy},\"\n+ f\"first and last elements in the last \"\n+ f\"window must not be a missing value.\")\n+ else:\n+ # formula for slope\n+ slope = (last_window[-1] -\n+ last_window[0]) / (self.window_length_ - 1)\n+\n+ # get zero-based index by subtracting the minimum\n+ fh_idx = fh.index_like(self.cutoff)\n+\n+ # linear extrapolation\n+ y_pred = last_window[-1] + (fh_idx + 1) * slope\n+ return y_pred\n", "issue": "Extend NaiveForecaster\nExtend `NaiveForecaster` to include all common naive forecasting strategies. For an overview, see this [chapter](https://otexts.com/fpp2/simple-methods.html#simple-methods).\r\n\r\n- [x] introduce `seasonal` as boolean kwarg, refactor \"seasonal_last\" and implement \"seasonal_mean\", so that we can set `seasonal=True` and `strategy=\"mean\"` for example\r\n- [x] add \"drift\" strategy, the forecasts should be similar to the forecasts from `ReducedRegressionForecaster(regressor=LinearRegression(), ...)`\r\n- [ ] implement more efficient `update_predict` routine\n", "before_files": [{"content": "#!/usr/bin/env python3 -u\n# coding: utf-8\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\n__all__ = [\"NaiveForecaster\"]\n__author__ = [\"Markus L\u00f6ning\", \"Piyush Gade\"]\n\nfrom warnings import warn\n\nimport numpy as np\nfrom sktime.forecasting.base._base import DEFAULT_ALPHA\nfrom sktime.forecasting.base._sktime import BaseLastWindowForecaster\nfrom sktime.forecasting.base._sktime import OptionalForecastingHorizonMixin\nfrom sktime.utils.validation.forecasting import check_sp\nfrom sktime.utils.validation.forecasting import check_window_length\n\n\nclass NaiveForecaster(OptionalForecastingHorizonMixin,\n BaseLastWindowForecaster):\n \"\"\"\n NaiveForecaster is a forecaster that makes forecasts using simple\n strategies.\n\n Parameters\n ----------\n strategy : str{\"last\", \"mean\"}, optional (default=\"last\")\n Strategy used to make forecasts:\n\n * \"last\" : forecast the last value in the\n training series when sp is 1.\n When sp is not 1,\n last value of each season\n in the last window will be\n forecasted for each season.\n * \"mean\" : forecast the mean of last window\n of training series when sp is 1.\n When sp is not 1, mean of all values\n in a season from last window will be\n forecasted for each season.\n\n sp : int, optional (default=1)\n Seasonal periodicity to use in the seasonal forecasting.\n\n window_length : int or None, optional (default=None)\n Window length to use in the `mean` strategy. If None, entire training\n series will be used.\n \"\"\"\n\n def __init__(self, strategy=\"last\", window_length=None, sp=1):\n super(NaiveForecaster, self).__init__()\n self.strategy = strategy\n self.sp = sp\n self.window_length = window_length\n\n def fit(self, y_train, fh=None, X_train=None):\n \"\"\"Fit to training data.\n\n Parameters\n ----------\n y_train : pd.Series\n Target time series to which to fit the forecaster.\n fh : int, list or np.array, optional (default=None)\n The forecasters horizon with the steps ahead to to predict.\n X_train : pd.DataFrame, optional (default=None)\n Exogenous variables are ignored\n Returns\n -------\n self : returns an instance of self.\n \"\"\" # X_train is ignored\n self._set_oh(y_train)\n self._set_fh(fh)\n\n if self.strategy == \"last\":\n if self.sp == 1:\n if self.window_length is not None:\n warn(\"For the `last` strategy, \"\n \"the `window_length` value will be ignored if `sp` \"\n \"== 1.\")\n self.window_length_ = 1\n\n else:\n self.sp_ = check_sp(self.sp)\n\n # window length we need for forecasts is just the\n # length of seasonal periodicity\n self.window_length_ = self.sp_\n\n elif self.strategy == \"mean\":\n # check window length is greater than sp for seasonal mean\n if self.window_length is not None and self.sp != 1:\n if self.window_length < self.sp:\n raise ValueError(f\"The `window_length`: \"\n f\"{self.window_length} is smaller than \"\n f\"`sp`: {self.sp}.\")\n self.window_length_ = check_window_length(self.window_length)\n self.sp_ = check_sp(self.sp)\n\n # if not given, set default window length for the mean strategy\n if self.window_length is None:\n self.window_length_ = len(y_train)\n\n else:\n allowed_strategies = (\"last\", \"mean\")\n raise ValueError(f\"Unknown strategy: {self.strategy}. Expected \"\n f\"one of: {allowed_strategies}.\")\n\n # check window length\n if self.window_length_ > len(self.oh):\n param = \"sp\" if self.strategy == \"last\" and self.sp != 1 \\\n else \"window_length_\"\n raise ValueError(\n f\"The {param}: {self.window_length_} is larger than \"\n f\"the training series.\")\n\n self._is_fitted = True\n return self\n\n def _predict_last_window(self, fh, X=None, return_pred_int=False,\n alpha=DEFAULT_ALPHA):\n \"\"\"Internal predict\"\"\"\n last_window = self._get_last_window()\n\n # if last window only contains missing values, return nan\n if np.all(np.isnan(last_window)) or len(last_window) == 0:\n return self._predict_nan(fh)\n\n elif self.strategy == \"last\":\n if self.sp == 1:\n return np.repeat(last_window[-1], len(fh))\n\n else:\n # we need to replicate the last window if max(fh) is larger\n # than sp,so that we still make forecasts by repeating the\n # last value for that season, assume fh is sorted, i.e. max(\n # fh) == fh[-1]\n if fh[-1] > self.sp_:\n reps = np.int(np.ceil(fh[-1] / self.sp_))\n last_window = np.tile(last_window, reps=reps)\n\n # get zero-based index by subtracting the minimum\n fh_idx = fh.index_like(self.cutoff)\n return last_window[fh_idx]\n\n elif self.strategy == \"mean\":\n if self.sp == 1:\n return np.repeat(np.nanmean(last_window), len(fh))\n\n else:\n # if the window length is not a multiple of sp, we pad the\n # window with nan values for easy computation of the mean\n remainder = self.window_length_ % self.sp_\n if remainder > 0:\n pad_width = self.sp_ - remainder\n else:\n pad_width = 0\n last_window = np.hstack([last_window,\n np.full(pad_width, np.nan)])\n\n # reshape last window, one column per season\n last_window = last_window.reshape(np.int(np.ceil(\n self.window_length_ / self.sp_)), self.sp_)\n\n # compute seasonal mean, averaging over rows\n y_pred = np.nanmean(last_window, axis=0)\n\n # we need to replicate the last window if max(fh) is\n # larger than sp,\n # so that we still make forecasts by repeating the\n # last value for that season,\n # assume fh is sorted, i.e. max(fh) == fh[-1]\n # only slicing all the last seasons into last_window\n if fh[-1] > self.sp_:\n reps = np.int(np.ceil(fh[-1] / self.sp_))\n y_pred = np.tile(y_pred, reps=reps)\n\n # get zero-based index by subtracting the minimum\n fh_idx = fh.index_like(self.cutoff)\n return y_pred[fh_idx]\n", "path": "sktime/forecasting/naive.py"}]}
| 2,627 | 694 |
gh_patches_debug_39491
|
rasdani/github-patches
|
git_diff
|
akvo__akvo-rsr-3403
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
IATI export error when result has no quantitative indicators
When running the IATI export, a result that has only qualitative indicators is reported as an error.
This seems to be a bug introduced when we started including qualitative indicators in the export, but didn't change the validation to allow for qualitative-only results.
</issue>
<code>
[start of akvo/iati/checks/fields/results.py]
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from akvo.rsr.models.result.utils import QUANTITATIVE
8
9 DGIS_VALIDATION_SET_NAME = u"DGIS IATI"
10
11
12 def results(project):
13 """
14 :param project: Project object
15 :return: All checks passed boolean, [Check results]
16 """
17 checks = []
18 all_checks_passed = True
19
20 DGIS_PROJECT = project.validations.filter(name=DGIS_VALIDATION_SET_NAME).count() == 1
21
22 for result in project.results.all():
23 if not result.type:
24 all_checks_passed = False
25 checks.append((u'error', u'result (id: %s) has no type specified' % str(result.pk)))
26
27 if not result.title:
28 all_checks_passed = False
29 checks.append((u'error', u'result (id: %s) has no title specified' % str(result.pk)))
30
31 if not result.indicators.filter(type=QUANTITATIVE):
32 all_checks_passed = False
33 checks.append(
34 (u'error', u'result (id: %s) has no quantitative indicator(s)' % str(result.pk))
35 )
36
37 for indicator in result.indicators.filter(type=QUANTITATIVE):
38 if not indicator.measure:
39 all_checks_passed = False
40 checks.append((u'error', u'indicator (id: %s) has no measure specified' %
41 str(indicator.pk)))
42
43 if not indicator.title:
44 all_checks_passed = False
45 checks.append((u'error', u'indicator (id: %s) has no title specified' %
46 str(indicator.pk)))
47
48 if not indicator.baseline_value:
49 if DGIS_PROJECT:
50 all_checks_passed = False
51 checks.append((u'warning', u'indicator (id: %s) baseline has no value '
52 u'specified, however the value of "N/A" has been '
53 u'set for the attribute' % str(indicator.pk)))
54 elif indicator.baseline_year or indicator.baseline_comment:
55 all_checks_passed = False
56 checks.append((u'error', u'indicator (id: %s) baseline has no value specified' %
57 str(indicator.pk)))
58
59 if not indicator.baseline_year:
60 if DGIS_PROJECT:
61 all_checks_passed = False
62 checks.append((u'warning', u'indicator (id: %s) baseline has no year '
63 u'specified, however the value of "1" has been '
64 u'set for the attribute' % str(indicator.pk)))
65 elif indicator.baseline_value or indicator.baseline_comment:
66 all_checks_passed = False
67 checks.append((u'error', u'indicator (id: %s) baseline has no year specified' %
68 str(indicator.pk)))
69
70 for reference in indicator.references.all():
71 if not reference.reference:
72 all_checks_passed = False
73 checks.append((u'error', u'indicator reference (id: %s) has no code '
74 u'specified' % str(reference.pk)))
75
76 if not reference.vocabulary:
77 all_checks_passed = False
78 checks.append((u'error', u'indicator reference (id: %s) has no vocabulary '
79 u'specified' % str(reference.pk)))
80
81 if reference.vocabulary == '99' and not reference.vocabulary_uri:
82 all_checks_passed = False
83 checks.append((u'error', u'indicator reference (id: %s) has vocabulary 99 '
84 u'(reporting organisation) but no vocabulary URI '
85 u'specified' % str(reference.pk)))
86
87 for period in indicator.periods.all():
88 if not period.period_start:
89 all_checks_passed = False
90 checks.append((u'error', u'indicator period (id: %s) has no start date '
91 u'specified' % str(period.pk)))
92
93 if not period.period_end:
94 all_checks_passed = False
95 checks.append((u'error', u'indicator period (id: %s) has no end date '
96 u'specified' % str(period.pk)))
97
98 if period.period_start and period.period_end and \
99 period.period_start > period.period_end:
100 all_checks_passed = False
101 checks.append((u'error', u'indicator period (id: %s) has a start date '
102 u'later than the end date' % str(period.pk)))
103
104 if not period.target_value:
105 if DGIS_PROJECT:
106 all_checks_passed = False
107 checks.append((u'warning', u'indicator period (id: %s) has no target value '
108 u'specified. The value "N/A" has been set for '
109 u'the target value attribute' % str(period.pk)))
110 elif (period.target_comment or period.target_locations.all() or
111 period.target_dimensions.all()):
112 all_checks_passed = False
113 checks.append((u'error', u'indicator period (id: %s) has no target value, '
114 u'but does have a target comment, target '
115 u'location(s) or target dimension(s)' %
116 str(period.pk)))
117
118 if not period.actual_value:
119 if DGIS_PROJECT:
120 all_checks_passed = False
121 checks.append((u'warning', u'indicator period (id: %s) has no actual value '
122 u'specified. The value "N/A" has been set for '
123 u'the actual value attribute' % str(period.pk)))
124 elif (period.actual_comment or period.actual_locations.all() or
125 period.actual_dimensions.all()):
126 all_checks_passed = False
127 checks.append((u'error', u'indicator period (id: %s) has no actual value, '
128 u'but does have a actual comment, actual '
129 u'location(s) or actual dimension(s)' %
130 str(period.pk)))
131
132 if project.results.all() and all_checks_passed:
133 checks.append((u'success', u'has valid result(s)'))
134
135 return all_checks_passed, checks
136
[end of akvo/iati/checks/fields/results.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/akvo/iati/checks/fields/results.py b/akvo/iati/checks/fields/results.py
--- a/akvo/iati/checks/fields/results.py
+++ b/akvo/iati/checks/fields/results.py
@@ -4,7 +4,7 @@
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
-from akvo.rsr.models.result.utils import QUANTITATIVE
+from akvo.rsr.models.result.utils import QUANTITATIVE, QUALITATIVE
DGIS_VALIDATION_SET_NAME = u"DGIS IATI"
@@ -28,14 +28,14 @@
all_checks_passed = False
checks.append((u'error', u'result (id: %s) has no title specified' % str(result.pk)))
- if not result.indicators.filter(type=QUANTITATIVE):
+ if not result.indicators.exists():
all_checks_passed = False
checks.append(
- (u'error', u'result (id: %s) has no quantitative indicator(s)' % str(result.pk))
+ (u'error', u'result (id: %s) has no indicator(s)' % str(result.pk))
)
- for indicator in result.indicators.filter(type=QUANTITATIVE):
- if not indicator.measure:
+ for indicator in result.indicators.all():
+ if indicator.type == QUANTITATIVE and not indicator.measure:
all_checks_passed = False
checks.append((u'error', u'indicator (id: %s) has no measure specified' %
str(indicator.pk)))
@@ -101,7 +101,7 @@
checks.append((u'error', u'indicator period (id: %s) has a start date '
u'later than the end date' % str(period.pk)))
- if not period.target_value:
+ if indicator.type == QUANTITATIVE and not period.target_value:
if DGIS_PROJECT:
all_checks_passed = False
checks.append((u'warning', u'indicator period (id: %s) has no target value '
@@ -115,7 +115,7 @@
u'location(s) or target dimension(s)' %
str(period.pk)))
- if not period.actual_value:
+ if indicator.type == QUANTITATIVE and not period.actual_value:
if DGIS_PROJECT:
all_checks_passed = False
checks.append((u'warning', u'indicator period (id: %s) has no actual value '
|
{"golden_diff": "diff --git a/akvo/iati/checks/fields/results.py b/akvo/iati/checks/fields/results.py\n--- a/akvo/iati/checks/fields/results.py\n+++ b/akvo/iati/checks/fields/results.py\n@@ -4,7 +4,7 @@\n # See more details in the license.txt file located at the root folder of the Akvo RSR module.\n # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n \n-from akvo.rsr.models.result.utils import QUANTITATIVE\n+from akvo.rsr.models.result.utils import QUANTITATIVE, QUALITATIVE\n \n DGIS_VALIDATION_SET_NAME = u\"DGIS IATI\"\n \n@@ -28,14 +28,14 @@\n all_checks_passed = False\n checks.append((u'error', u'result (id: %s) has no title specified' % str(result.pk)))\n \n- if not result.indicators.filter(type=QUANTITATIVE):\n+ if not result.indicators.exists():\n all_checks_passed = False\n checks.append(\n- (u'error', u'result (id: %s) has no quantitative indicator(s)' % str(result.pk))\n+ (u'error', u'result (id: %s) has no indicator(s)' % str(result.pk))\n )\n \n- for indicator in result.indicators.filter(type=QUANTITATIVE):\n- if not indicator.measure:\n+ for indicator in result.indicators.all():\n+ if indicator.type == QUANTITATIVE and not indicator.measure:\n all_checks_passed = False\n checks.append((u'error', u'indicator (id: %s) has no measure specified' %\n str(indicator.pk)))\n@@ -101,7 +101,7 @@\n checks.append((u'error', u'indicator period (id: %s) has a start date '\n u'later than the end date' % str(period.pk)))\n \n- if not period.target_value:\n+ if indicator.type == QUANTITATIVE and not period.target_value:\n if DGIS_PROJECT:\n all_checks_passed = False\n checks.append((u'warning', u'indicator period (id: %s) has no target value '\n@@ -115,7 +115,7 @@\n u'location(s) or target dimension(s)' %\n str(period.pk)))\n \n- if not period.actual_value:\n+ if indicator.type == QUANTITATIVE and not period.actual_value:\n if DGIS_PROJECT:\n all_checks_passed = False\n checks.append((u'warning', u'indicator period (id: %s) has no actual value '\n", "issue": "IATI export error when result has no quantitative indicators\nWhen running the IATI export, a result that has only qualitative indicators is reported as an error.\r\n\r\nThis seems to be a bug introduced when we started including qualitative indicators in the export, but didn't change the validation to allow for qualitative-only results.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom akvo.rsr.models.result.utils import QUANTITATIVE\n\nDGIS_VALIDATION_SET_NAME = u\"DGIS IATI\"\n\n\ndef results(project):\n \"\"\"\n :param project: Project object\n :return: All checks passed boolean, [Check results]\n \"\"\"\n checks = []\n all_checks_passed = True\n\n DGIS_PROJECT = project.validations.filter(name=DGIS_VALIDATION_SET_NAME).count() == 1\n\n for result in project.results.all():\n if not result.type:\n all_checks_passed = False\n checks.append((u'error', u'result (id: %s) has no type specified' % str(result.pk)))\n\n if not result.title:\n all_checks_passed = False\n checks.append((u'error', u'result (id: %s) has no title specified' % str(result.pk)))\n\n if not result.indicators.filter(type=QUANTITATIVE):\n all_checks_passed = False\n checks.append(\n (u'error', u'result (id: %s) has no quantitative indicator(s)' % str(result.pk))\n )\n\n for indicator in result.indicators.filter(type=QUANTITATIVE):\n if not indicator.measure:\n all_checks_passed = False\n checks.append((u'error', u'indicator (id: %s) has no measure specified' %\n str(indicator.pk)))\n\n if not indicator.title:\n all_checks_passed = False\n checks.append((u'error', u'indicator (id: %s) has no title specified' %\n str(indicator.pk)))\n\n if not indicator.baseline_value:\n if DGIS_PROJECT:\n all_checks_passed = False\n checks.append((u'warning', u'indicator (id: %s) baseline has no value '\n u'specified, however the value of \"N/A\" has been '\n u'set for the attribute' % str(indicator.pk)))\n elif indicator.baseline_year or indicator.baseline_comment:\n all_checks_passed = False\n checks.append((u'error', u'indicator (id: %s) baseline has no value specified' %\n str(indicator.pk)))\n\n if not indicator.baseline_year:\n if DGIS_PROJECT:\n all_checks_passed = False\n checks.append((u'warning', u'indicator (id: %s) baseline has no year '\n u'specified, however the value of \"1\" has been '\n u'set for the attribute' % str(indicator.pk)))\n elif indicator.baseline_value or indicator.baseline_comment:\n all_checks_passed = False\n checks.append((u'error', u'indicator (id: %s) baseline has no year specified' %\n str(indicator.pk)))\n\n for reference in indicator.references.all():\n if not reference.reference:\n all_checks_passed = False\n checks.append((u'error', u'indicator reference (id: %s) has no code '\n u'specified' % str(reference.pk)))\n\n if not reference.vocabulary:\n all_checks_passed = False\n checks.append((u'error', u'indicator reference (id: %s) has no vocabulary '\n u'specified' % str(reference.pk)))\n\n if reference.vocabulary == '99' and not reference.vocabulary_uri:\n all_checks_passed = False\n checks.append((u'error', u'indicator reference (id: %s) has vocabulary 99 '\n u'(reporting organisation) but no vocabulary URI '\n u'specified' % str(reference.pk)))\n\n for period in indicator.periods.all():\n if not period.period_start:\n all_checks_passed = False\n checks.append((u'error', u'indicator period (id: %s) has no start date '\n u'specified' % str(period.pk)))\n\n if not period.period_end:\n all_checks_passed = False\n checks.append((u'error', u'indicator period (id: %s) has no end date '\n u'specified' % str(period.pk)))\n\n if period.period_start and period.period_end and \\\n period.period_start > period.period_end:\n all_checks_passed = False\n checks.append((u'error', u'indicator period (id: %s) has a start date '\n u'later than the end date' % str(period.pk)))\n\n if not period.target_value:\n if DGIS_PROJECT:\n all_checks_passed = False\n checks.append((u'warning', u'indicator period (id: %s) has no target value '\n u'specified. The value \"N/A\" has been set for '\n u'the target value attribute' % str(period.pk)))\n elif (period.target_comment or period.target_locations.all() or\n period.target_dimensions.all()):\n all_checks_passed = False\n checks.append((u'error', u'indicator period (id: %s) has no target value, '\n u'but does have a target comment, target '\n u'location(s) or target dimension(s)' %\n str(period.pk)))\n\n if not period.actual_value:\n if DGIS_PROJECT:\n all_checks_passed = False\n checks.append((u'warning', u'indicator period (id: %s) has no actual value '\n u'specified. The value \"N/A\" has been set for '\n u'the actual value attribute' % str(period.pk)))\n elif (period.actual_comment or period.actual_locations.all() or\n period.actual_dimensions.all()):\n all_checks_passed = False\n checks.append((u'error', u'indicator period (id: %s) has no actual value, '\n u'but does have a actual comment, actual '\n u'location(s) or actual dimension(s)' %\n str(period.pk)))\n\n if project.results.all() and all_checks_passed:\n checks.append((u'success', u'has valid result(s)'))\n\n return all_checks_passed, checks\n", "path": "akvo/iati/checks/fields/results.py"}]}
| 2,251 | 591 |
gh_patches_debug_14027
|
rasdani/github-patches
|
git_diff
|
cal-itp__benefits-451
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove all paragraph content from the viewmodel in `core:index`
From https://github.com/cal-itp/benefits/issues/366
<img width="832" alt="image" src="https://user-images.githubusercontent.com/3673236/163299119-7b8c7696-05b5-4ade-9bc2-c5b784707a66.png">
</issue>
<code>
[start of benefits/core/views.py]
1 """
2 The core application: view definition for the root of the webapp.
3 """
4 from django.http import HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError
5 from django.template import loader
6 from django.template.response import TemplateResponse
7 from django.urls import reverse
8 from django.utils.translation import gettext as _
9
10 from . import middleware, models, session, viewmodels
11
12
13 def PageTemplateResponse(request, page_vm):
14 """Helper returns a TemplateResponse using the common page template."""
15 return TemplateResponse(request, "core/page.html", page_vm.context_dict())
16
17
18 def _index_content_title():
19 """Helper returns the content title for the common index page."""
20 return _("core.pages.index.content_title")
21
22
23 def _index_paragraphs():
24 """Helper returns the content paragraphs for the common index page."""
25 return [_("core.pages.index.p[0]"), _("core.pages.index.p[1]"), _("core.pages.index.p[2]")]
26
27
28 def _index_url():
29 """Helper computes the index url path."""
30 return reverse("core:index")
31
32
33 @middleware.pageview_decorator
34 def index(request):
35 """View handler for the main entry page."""
36 session.reset(request)
37
38 # generate a button to the landing page for each active agency
39 agencies = models.TransitAgency.all_active()
40 buttons = [viewmodels.Button.outline_primary(text=a.short_name, url=a.index_url) for a in agencies]
41 buttons[0].classes.append("mt-3")
42 buttons[0].label = _("core.pages.index.chooseprovider")
43
44 page = viewmodels.Page(
45 content_title=_index_content_title(),
46 paragraphs=_index_paragraphs(),
47 buttons=buttons,
48 classes="home",
49 )
50
51 return PageTemplateResponse(request, page)
52
53
54 @middleware.pageview_decorator
55 def agency_index(request, agency):
56 """View handler for an agency entry page."""
57 session.reset(request)
58 session.update(request, agency=agency, origin=agency.index_url)
59
60 button = viewmodels.Button.primary(text=_("core.pages.index.continue"), url=reverse("eligibility:index"))
61 button.label = _("core.pages.agency_index.button.label")
62
63 page = viewmodels.Page(
64 content_title=_("core.pages.agency_index.content_title"),
65 button=button,
66 classes="home",
67 )
68
69 help_page = reverse("core:help")
70 context_dict = {**page.context_dict(), **{"info_link": f"{help_page}#about"}}
71
72 return TemplateResponse(request, "core/agency_index.html", context_dict)
73
74
75 @middleware.pageview_decorator
76 def help(request):
77 """View handler for the help page."""
78 if session.active_agency(request):
79 agency = session.agency(request)
80 buttons = viewmodels.Button.agency_contact_links(agency)
81 else:
82 buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)]
83
84 buttons.append(viewmodels.Button.home(request, _("core.buttons.back")))
85
86 page = viewmodels.Page(
87 title=_("core.buttons.help"),
88 content_title=_("core.buttons.help"),
89 buttons=buttons,
90 classes="text-lg-center",
91 noimage=True,
92 )
93
94 return TemplateResponse(request, "core/help.html", page.context_dict())
95
96
97 @middleware.pageview_decorator
98 def bad_request(request, exception, template_name="400.html"):
99 """View handler for HTTP 400 Bad Request responses."""
100 if session.active_agency(request):
101 session.update(request, origin=session.agency(request).index_url)
102 else:
103 session.update(request, origin=_index_url())
104
105 home = viewmodels.Button.home(request)
106 page = viewmodels.ErrorPage.error(button=home)
107 t = loader.get_template(template_name)
108
109 return HttpResponseBadRequest(t.render(page.context_dict()))
110
111
112 @middleware.pageview_decorator
113 def csrf_failure(request, reason):
114 """
115 View handler for CSRF_FAILURE_VIEW with custom data.
116 """
117 if session.active_agency(request):
118 session.update(request, origin=session.agency(request).index_url)
119 else:
120 session.update(request, origin=_index_url())
121
122 home = viewmodels.Button.home(request)
123 page = viewmodels.ErrorPage.not_found(button=home, path=request.path)
124 t = loader.get_template("400.html")
125
126 return HttpResponseNotFound(t.render(page.context_dict()))
127
128
129 @middleware.pageview_decorator
130 def page_not_found(request, exception, template_name="404.html"):
131 """View handler for HTTP 404 Not Found responses."""
132 if session.active_agency(request):
133 session.update(request, origin=session.agency(request).index_url)
134 else:
135 session.update(request, origin=_index_url())
136
137 home = viewmodels.Button.home(request)
138 page = viewmodels.ErrorPage.not_found(button=home, path=request.path)
139 t = loader.get_template(template_name)
140
141 return HttpResponseNotFound(t.render(page.context_dict()))
142
143
144 @middleware.pageview_decorator
145 def server_error(request, template_name="500.html"):
146 """View handler for HTTP 500 Server Error responses."""
147 if session.active_agency(request):
148 session.update(request, origin=session.agency(request).index_url)
149 else:
150 session.update(request, origin=_index_url())
151
152 home = viewmodels.Button.home(request)
153 page = viewmodels.ErrorPage.error(button=home)
154 t = loader.get_template(template_name)
155
156 return HttpResponseServerError(t.render(page.context_dict()))
157
[end of benefits/core/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/benefits/core/views.py b/benefits/core/views.py
--- a/benefits/core/views.py
+++ b/benefits/core/views.py
@@ -20,11 +20,6 @@
return _("core.pages.index.content_title")
-def _index_paragraphs():
- """Helper returns the content paragraphs for the common index page."""
- return [_("core.pages.index.p[0]"), _("core.pages.index.p[1]"), _("core.pages.index.p[2]")]
-
-
def _index_url():
"""Helper computes the index url path."""
return reverse("core:index")
@@ -43,7 +38,6 @@
page = viewmodels.Page(
content_title=_index_content_title(),
- paragraphs=_index_paragraphs(),
buttons=buttons,
classes="home",
)
|
{"golden_diff": "diff --git a/benefits/core/views.py b/benefits/core/views.py\n--- a/benefits/core/views.py\n+++ b/benefits/core/views.py\n@@ -20,11 +20,6 @@\n return _(\"core.pages.index.content_title\")\n \n \n-def _index_paragraphs():\n- \"\"\"Helper returns the content paragraphs for the common index page.\"\"\"\n- return [_(\"core.pages.index.p[0]\"), _(\"core.pages.index.p[1]\"), _(\"core.pages.index.p[2]\")]\n-\n-\n def _index_url():\n \"\"\"Helper computes the index url path.\"\"\"\n return reverse(\"core:index\")\n@@ -43,7 +38,6 @@\n \n page = viewmodels.Page(\n content_title=_index_content_title(),\n- paragraphs=_index_paragraphs(),\n buttons=buttons,\n classes=\"home\",\n )\n", "issue": "Remove all paragraph content from the viewmodel in `core:index`\nFrom https://github.com/cal-itp/benefits/issues/366\r\n<img width=\"832\" alt=\"image\" src=\"https://user-images.githubusercontent.com/3673236/163299119-7b8c7696-05b5-4ade-9bc2-c5b784707a66.png\">\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nThe core application: view definition for the root of the webapp.\n\"\"\"\nfrom django.http import HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError\nfrom django.template import loader\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.translation import gettext as _\n\nfrom . import middleware, models, session, viewmodels\n\n\ndef PageTemplateResponse(request, page_vm):\n \"\"\"Helper returns a TemplateResponse using the common page template.\"\"\"\n return TemplateResponse(request, \"core/page.html\", page_vm.context_dict())\n\n\ndef _index_content_title():\n \"\"\"Helper returns the content title for the common index page.\"\"\"\n return _(\"core.pages.index.content_title\")\n\n\ndef _index_paragraphs():\n \"\"\"Helper returns the content paragraphs for the common index page.\"\"\"\n return [_(\"core.pages.index.p[0]\"), _(\"core.pages.index.p[1]\"), _(\"core.pages.index.p[2]\")]\n\n\ndef _index_url():\n \"\"\"Helper computes the index url path.\"\"\"\n return reverse(\"core:index\")\n\n\[email protected]_decorator\ndef index(request):\n \"\"\"View handler for the main entry page.\"\"\"\n session.reset(request)\n\n # generate a button to the landing page for each active agency\n agencies = models.TransitAgency.all_active()\n buttons = [viewmodels.Button.outline_primary(text=a.short_name, url=a.index_url) for a in agencies]\n buttons[0].classes.append(\"mt-3\")\n buttons[0].label = _(\"core.pages.index.chooseprovider\")\n\n page = viewmodels.Page(\n content_title=_index_content_title(),\n paragraphs=_index_paragraphs(),\n buttons=buttons,\n classes=\"home\",\n )\n\n return PageTemplateResponse(request, page)\n\n\[email protected]_decorator\ndef agency_index(request, agency):\n \"\"\"View handler for an agency entry page.\"\"\"\n session.reset(request)\n session.update(request, agency=agency, origin=agency.index_url)\n\n button = viewmodels.Button.primary(text=_(\"core.pages.index.continue\"), url=reverse(\"eligibility:index\"))\n button.label = _(\"core.pages.agency_index.button.label\")\n\n page = viewmodels.Page(\n content_title=_(\"core.pages.agency_index.content_title\"),\n button=button,\n classes=\"home\",\n )\n\n help_page = reverse(\"core:help\")\n context_dict = {**page.context_dict(), **{\"info_link\": f\"{help_page}#about\"}}\n\n return TemplateResponse(request, \"core/agency_index.html\", context_dict)\n\n\[email protected]_decorator\ndef help(request):\n \"\"\"View handler for the help page.\"\"\"\n if session.active_agency(request):\n agency = session.agency(request)\n buttons = viewmodels.Button.agency_contact_links(agency)\n else:\n buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)]\n\n buttons.append(viewmodels.Button.home(request, _(\"core.buttons.back\")))\n\n page = viewmodels.Page(\n title=_(\"core.buttons.help\"),\n content_title=_(\"core.buttons.help\"),\n buttons=buttons,\n classes=\"text-lg-center\",\n noimage=True,\n )\n\n return TemplateResponse(request, \"core/help.html\", page.context_dict())\n\n\[email protected]_decorator\ndef bad_request(request, exception, template_name=\"400.html\"):\n \"\"\"View handler for HTTP 400 Bad Request responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseBadRequest(t.render(page.context_dict()))\n\n\[email protected]_decorator\ndef csrf_failure(request, reason):\n \"\"\"\n View handler for CSRF_FAILURE_VIEW with custom data.\n \"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.not_found(button=home, path=request.path)\n t = loader.get_template(\"400.html\")\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\[email protected]_decorator\ndef page_not_found(request, exception, template_name=\"404.html\"):\n \"\"\"View handler for HTTP 404 Not Found responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.not_found(button=home, path=request.path)\n t = loader.get_template(template_name)\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\[email protected]_decorator\ndef server_error(request, template_name=\"500.html\"):\n \"\"\"View handler for HTTP 500 Server Error responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseServerError(t.render(page.context_dict()))\n", "path": "benefits/core/views.py"}]}
| 2,140 | 180 |
gh_patches_debug_33025
|
rasdani/github-patches
|
git_diff
|
pypi__warehouse-2473
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Admin package upload size text control should use MB instead of bytes
Context: https://github.com/pypa/warehouse/pull/2470#issuecomment-334852617
Decision:
> The input in should be in MB and should just be converted to/from bytes in the backend.
Also:
> It also might be a good idea to flash an error and redirect back to the detail page if you try to set a limit less then the current default minimum.
Additionally:
> [The form field] would be a bit nicer as <input type="number" min={{ THE MINMUM SIZE }} ...> instead of as a text field. It might also make sense to include step="10" or something
</issue>
<code>
[start of warehouse/admin/views/projects.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import shlex
14
15 from paginate_sqlalchemy import SqlalchemyOrmPage as SQLAlchemyORMPage
16 from pyramid.httpexceptions import (
17 HTTPBadRequest,
18 HTTPMovedPermanently,
19 HTTPSeeOther,
20 )
21 from pyramid.view import view_config
22 from sqlalchemy import or_
23
24 from warehouse.accounts.models import User
25 from warehouse.packaging.models import Project, Release, Role, JournalEntry
26 from warehouse.utils.paginate import paginate_url_factory
27
28
29 @view_config(
30 route_name="admin.project.list",
31 renderer="admin/projects/list.html",
32 permission="admin",
33 uses_session=True,
34 )
35 def project_list(request):
36 q = request.params.get("q")
37
38 try:
39 page_num = int(request.params.get("page", 1))
40 except ValueError:
41 raise HTTPBadRequest("'page' must be an integer.") from None
42
43 projects_query = request.db.query(Project).order_by(Project.name)
44
45 if q:
46 terms = shlex.split(q)
47
48 filters = []
49 for term in terms:
50 filters.append(Project.name.ilike(term))
51
52 projects_query = projects_query.filter(or_(*filters))
53
54 projects = SQLAlchemyORMPage(
55 projects_query,
56 page=page_num,
57 items_per_page=25,
58 url_maker=paginate_url_factory(request),
59 )
60
61 return {"projects": projects, "query": q}
62
63
64 @view_config(route_name="admin.project.detail",
65 renderer="admin/projects/detail.html",
66 permission="admin",
67 uses_session=True,
68 require_csrf=True,
69 require_methods=False)
70 def project_detail(project, request):
71 project_name = request.matchdict["project_name"]
72
73 if project_name != project.normalized_name:
74 raise HTTPMovedPermanently(
75 request.current_route_path(
76 project_name=project.normalized_name,
77 ),
78 )
79
80 maintainers = [
81 role
82 for role in (
83 request.db.query(Role)
84 .join(User)
85 .filter(Role.project == project)
86 .distinct(User.username)
87 .all()
88 )
89 ]
90 maintainers = sorted(
91 maintainers,
92 key=lambda x: (x.role_name, x.user.username),
93 )
94 journal = [
95 entry
96 for entry in (
97 request.db.query(JournalEntry)
98 .filter(JournalEntry.name == project.name)
99 .order_by(JournalEntry.submitted_date.desc())
100 .limit(50)
101 )
102 ]
103
104 return {"project": project, "maintainers": maintainers, "journal": journal}
105
106
107 @view_config(
108 route_name="admin.project.releases",
109 renderer="admin/projects/releases_list.html",
110 permission="admin",
111 uses_session=True,
112 )
113 def releases_list(project, request):
114 q = request.params.get("q")
115 project_name = request.matchdict["project_name"]
116
117 if project_name != project.normalized_name:
118 raise HTTPMovedPermanently(
119 request.current_route_path(
120 project_name=project.normalized_name,
121 ),
122 )
123
124 try:
125 page_num = int(request.params.get("page", 1))
126 except ValueError:
127 raise HTTPBadRequest("'page' must be an integer.") from None
128
129 releases_query = (request.db.query(Release)
130 .filter(Release.project == project)
131 .order_by(Release._pypi_ordering.desc()))
132
133 if q:
134 terms = shlex.split(q)
135
136 filters = []
137 for term in terms:
138 if ":" in term:
139 field, value = term.split(":", 1)
140 if field.lower() == "version":
141 filters.append(Release.version.ilike(value))
142
143 releases_query = releases_query.filter(or_(*filters))
144
145 releases = SQLAlchemyORMPage(
146 releases_query,
147 page=page_num,
148 items_per_page=25,
149 url_maker=paginate_url_factory(request),
150 )
151
152 return {
153 "releases": releases,
154 "project": project,
155 "query": q,
156 }
157
158
159 @view_config(
160 route_name="admin.project.journals",
161 renderer="admin/projects/journals_list.html",
162 permission="admin",
163 uses_session=True,
164 )
165 def journals_list(project, request):
166 q = request.params.get("q")
167 project_name = request.matchdict["project_name"]
168
169 if project_name != project.normalized_name:
170 raise HTTPMovedPermanently(
171 request.current_route_path(
172 project_name=project.normalized_name,
173 ),
174 )
175
176 try:
177 page_num = int(request.params.get("page", 1))
178 except ValueError:
179 raise HTTPBadRequest("'page' must be an integer.") from None
180
181 journals_query = (request.db.query(JournalEntry)
182 .filter(JournalEntry.name == project.name)
183 .order_by(JournalEntry.submitted_date.desc()))
184
185 if q:
186 terms = shlex.split(q)
187
188 filters = []
189 for term in terms:
190 if ":" in term:
191 field, value = term.split(":", 1)
192 if field.lower() == "version":
193 filters.append(JournalEntry.version.ilike(value))
194
195 journals_query = journals_query.filter(or_(*filters))
196
197 journals = SQLAlchemyORMPage(
198 journals_query,
199 page=page_num,
200 items_per_page=25,
201 url_maker=paginate_url_factory(request),
202 )
203
204 return {"journals": journals, "project": project, "query": q}
205
206
207 @view_config(
208 route_name="admin.project.set_upload_limit",
209 permission="admin",
210 request_method="POST",
211 uses_session=True,
212 require_methods=False,
213 )
214 def set_upload_limit(project, request):
215 upload_limit = request.POST.get("upload_limit", "")
216
217 # Update the project's upload limit.
218 # If the upload limit is an empty string or othrwise falsy, just set the
219 # limit to None, indicating the default limit.
220 if not upload_limit:
221 project.upload_limit = None
222 else:
223 try:
224 project.upload_limit = int(upload_limit)
225 except ValueError:
226 raise HTTPBadRequest(
227 f"Invalid value for upload_limit: {upload_limit}, "
228 f"must be integer or empty string.")
229
230 request.session.flash(
231 f"Successfully set the upload limit on {project.name!r} to "
232 f"{project.upload_limit!r}",
233 queue="success",
234 )
235
236 return HTTPSeeOther(
237 request.route_path(
238 'admin.project.detail', project_name=project.normalized_name))
239
[end of warehouse/admin/views/projects.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/warehouse/admin/views/projects.py b/warehouse/admin/views/projects.py
--- a/warehouse/admin/views/projects.py
+++ b/warehouse/admin/views/projects.py
@@ -24,6 +24,9 @@
from warehouse.accounts.models import User
from warehouse.packaging.models import Project, Release, Role, JournalEntry
from warehouse.utils.paginate import paginate_url_factory
+from warehouse.forklift.legacy import MAX_FILESIZE
+
+ONE_MB = 1024 * 1024 # bytes
@view_config(
@@ -101,7 +104,13 @@
)
]
- return {"project": project, "maintainers": maintainers, "journal": journal}
+ return {
+ "project": project,
+ "maintainers": maintainers,
+ "journal": journal,
+ "ONE_MB": ONE_MB,
+ "MAX_FILESIZE": MAX_FILESIZE
+ }
@view_config(
@@ -218,18 +227,27 @@
# If the upload limit is an empty string or othrwise falsy, just set the
# limit to None, indicating the default limit.
if not upload_limit:
- project.upload_limit = None
+ upload_limit = None
else:
try:
- project.upload_limit = int(upload_limit)
+ upload_limit = int(upload_limit)
except ValueError:
raise HTTPBadRequest(
- f"Invalid value for upload_limit: {upload_limit}, "
+ f"Invalid value for upload limit: {upload_limit}, "
f"must be integer or empty string.")
+ # The form is in MB, but the database field is in bytes.
+ upload_limit *= ONE_MB
+
+ if upload_limit < MAX_FILESIZE:
+ raise HTTPBadRequest(
+ f"Upload limit can not be less than the default limit of "
+ f"{MAX_FILESIZE / ONE_MB}MB.")
+
+ project.upload_limit = upload_limit
+
request.session.flash(
- f"Successfully set the upload limit on {project.name!r} to "
- f"{project.upload_limit!r}",
+ f"Successfully set the upload limit on {project.name!r}.",
queue="success",
)
|
{"golden_diff": "diff --git a/warehouse/admin/views/projects.py b/warehouse/admin/views/projects.py\n--- a/warehouse/admin/views/projects.py\n+++ b/warehouse/admin/views/projects.py\n@@ -24,6 +24,9 @@\n from warehouse.accounts.models import User\n from warehouse.packaging.models import Project, Release, Role, JournalEntry\n from warehouse.utils.paginate import paginate_url_factory\n+from warehouse.forklift.legacy import MAX_FILESIZE\n+\n+ONE_MB = 1024 * 1024 # bytes\n \n \n @view_config(\n@@ -101,7 +104,13 @@\n )\n ]\n \n- return {\"project\": project, \"maintainers\": maintainers, \"journal\": journal}\n+ return {\n+ \"project\": project,\n+ \"maintainers\": maintainers,\n+ \"journal\": journal,\n+ \"ONE_MB\": ONE_MB,\n+ \"MAX_FILESIZE\": MAX_FILESIZE\n+ }\n \n \n @view_config(\n@@ -218,18 +227,27 @@\n # If the upload limit is an empty string or othrwise falsy, just set the\n # limit to None, indicating the default limit.\n if not upload_limit:\n- project.upload_limit = None\n+ upload_limit = None\n else:\n try:\n- project.upload_limit = int(upload_limit)\n+ upload_limit = int(upload_limit)\n except ValueError:\n raise HTTPBadRequest(\n- f\"Invalid value for upload_limit: {upload_limit}, \"\n+ f\"Invalid value for upload limit: {upload_limit}, \"\n f\"must be integer or empty string.\")\n \n+ # The form is in MB, but the database field is in bytes.\n+ upload_limit *= ONE_MB\n+\n+ if upload_limit < MAX_FILESIZE:\n+ raise HTTPBadRequest(\n+ f\"Upload limit can not be less than the default limit of \"\n+ f\"{MAX_FILESIZE / ONE_MB}MB.\")\n+\n+ project.upload_limit = upload_limit\n+\n request.session.flash(\n- f\"Successfully set the upload limit on {project.name!r} to \"\n- f\"{project.upload_limit!r}\",\n+ f\"Successfully set the upload limit on {project.name!r}.\",\n queue=\"success\",\n )\n", "issue": "Admin package upload size text control should use MB instead of bytes\nContext: https://github.com/pypa/warehouse/pull/2470#issuecomment-334852617\r\n\r\nDecision:\r\n\r\n> The input in should be in MB and should just be converted to/from bytes in the backend.\r\n\r\nAlso:\r\n\r\n> It also might be a good idea to flash an error and redirect back to the detail page if you try to set a limit less then the current default minimum.\r\n\r\nAdditionally:\r\n\r\n> [The form field] would be a bit nicer as <input type=\"number\" min={{ THE MINMUM SIZE }} ...> instead of as a text field. It might also make sense to include step=\"10\" or something\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport shlex\n\nfrom paginate_sqlalchemy import SqlalchemyOrmPage as SQLAlchemyORMPage\nfrom pyramid.httpexceptions import (\n HTTPBadRequest,\n HTTPMovedPermanently,\n HTTPSeeOther,\n)\nfrom pyramid.view import view_config\nfrom sqlalchemy import or_\n\nfrom warehouse.accounts.models import User\nfrom warehouse.packaging.models import Project, Release, Role, JournalEntry\nfrom warehouse.utils.paginate import paginate_url_factory\n\n\n@view_config(\n route_name=\"admin.project.list\",\n renderer=\"admin/projects/list.html\",\n permission=\"admin\",\n uses_session=True,\n)\ndef project_list(request):\n q = request.params.get(\"q\")\n\n try:\n page_num = int(request.params.get(\"page\", 1))\n except ValueError:\n raise HTTPBadRequest(\"'page' must be an integer.\") from None\n\n projects_query = request.db.query(Project).order_by(Project.name)\n\n if q:\n terms = shlex.split(q)\n\n filters = []\n for term in terms:\n filters.append(Project.name.ilike(term))\n\n projects_query = projects_query.filter(or_(*filters))\n\n projects = SQLAlchemyORMPage(\n projects_query,\n page=page_num,\n items_per_page=25,\n url_maker=paginate_url_factory(request),\n )\n\n return {\"projects\": projects, \"query\": q}\n\n\n@view_config(route_name=\"admin.project.detail\",\n renderer=\"admin/projects/detail.html\",\n permission=\"admin\",\n uses_session=True,\n require_csrf=True,\n require_methods=False)\ndef project_detail(project, request):\n project_name = request.matchdict[\"project_name\"]\n\n if project_name != project.normalized_name:\n raise HTTPMovedPermanently(\n request.current_route_path(\n project_name=project.normalized_name,\n ),\n )\n\n maintainers = [\n role\n for role in (\n request.db.query(Role)\n .join(User)\n .filter(Role.project == project)\n .distinct(User.username)\n .all()\n )\n ]\n maintainers = sorted(\n maintainers,\n key=lambda x: (x.role_name, x.user.username),\n )\n journal = [\n entry\n for entry in (\n request.db.query(JournalEntry)\n .filter(JournalEntry.name == project.name)\n .order_by(JournalEntry.submitted_date.desc())\n .limit(50)\n )\n ]\n\n return {\"project\": project, \"maintainers\": maintainers, \"journal\": journal}\n\n\n@view_config(\n route_name=\"admin.project.releases\",\n renderer=\"admin/projects/releases_list.html\",\n permission=\"admin\",\n uses_session=True,\n)\ndef releases_list(project, request):\n q = request.params.get(\"q\")\n project_name = request.matchdict[\"project_name\"]\n\n if project_name != project.normalized_name:\n raise HTTPMovedPermanently(\n request.current_route_path(\n project_name=project.normalized_name,\n ),\n )\n\n try:\n page_num = int(request.params.get(\"page\", 1))\n except ValueError:\n raise HTTPBadRequest(\"'page' must be an integer.\") from None\n\n releases_query = (request.db.query(Release)\n .filter(Release.project == project)\n .order_by(Release._pypi_ordering.desc()))\n\n if q:\n terms = shlex.split(q)\n\n filters = []\n for term in terms:\n if \":\" in term:\n field, value = term.split(\":\", 1)\n if field.lower() == \"version\":\n filters.append(Release.version.ilike(value))\n\n releases_query = releases_query.filter(or_(*filters))\n\n releases = SQLAlchemyORMPage(\n releases_query,\n page=page_num,\n items_per_page=25,\n url_maker=paginate_url_factory(request),\n )\n\n return {\n \"releases\": releases,\n \"project\": project,\n \"query\": q,\n }\n\n\n@view_config(\n route_name=\"admin.project.journals\",\n renderer=\"admin/projects/journals_list.html\",\n permission=\"admin\",\n uses_session=True,\n)\ndef journals_list(project, request):\n q = request.params.get(\"q\")\n project_name = request.matchdict[\"project_name\"]\n\n if project_name != project.normalized_name:\n raise HTTPMovedPermanently(\n request.current_route_path(\n project_name=project.normalized_name,\n ),\n )\n\n try:\n page_num = int(request.params.get(\"page\", 1))\n except ValueError:\n raise HTTPBadRequest(\"'page' must be an integer.\") from None\n\n journals_query = (request.db.query(JournalEntry)\n .filter(JournalEntry.name == project.name)\n .order_by(JournalEntry.submitted_date.desc()))\n\n if q:\n terms = shlex.split(q)\n\n filters = []\n for term in terms:\n if \":\" in term:\n field, value = term.split(\":\", 1)\n if field.lower() == \"version\":\n filters.append(JournalEntry.version.ilike(value))\n\n journals_query = journals_query.filter(or_(*filters))\n\n journals = SQLAlchemyORMPage(\n journals_query,\n page=page_num,\n items_per_page=25,\n url_maker=paginate_url_factory(request),\n )\n\n return {\"journals\": journals, \"project\": project, \"query\": q}\n\n\n@view_config(\n route_name=\"admin.project.set_upload_limit\",\n permission=\"admin\",\n request_method=\"POST\",\n uses_session=True,\n require_methods=False,\n)\ndef set_upload_limit(project, request):\n upload_limit = request.POST.get(\"upload_limit\", \"\")\n\n # Update the project's upload limit.\n # If the upload limit is an empty string or othrwise falsy, just set the\n # limit to None, indicating the default limit.\n if not upload_limit:\n project.upload_limit = None\n else:\n try:\n project.upload_limit = int(upload_limit)\n except ValueError:\n raise HTTPBadRequest(\n f\"Invalid value for upload_limit: {upload_limit}, \"\n f\"must be integer or empty string.\")\n\n request.session.flash(\n f\"Successfully set the upload limit on {project.name!r} to \"\n f\"{project.upload_limit!r}\",\n queue=\"success\",\n )\n\n return HTTPSeeOther(\n request.route_path(\n 'admin.project.detail', project_name=project.normalized_name))\n", "path": "warehouse/admin/views/projects.py"}]}
| 2,777 | 496 |
gh_patches_debug_25731
|
rasdani/github-patches
|
git_diff
|
bookwyrm-social__bookwyrm-3207
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"year in the books" should not appear for newly registered users
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
1. Register a new account during the time "{year} in the books" appears
2. Get a messsage encouraging you to see all the books you read in the year, which will be none.
**Expected behavior**
This should not be displayed for users who have no readthroughs.
**Screenshots**

**Instance**
dev site
</issue>
<code>
[start of bookwyrm/views/feed.py]
1 """ non-interactive pages """
2 from django.contrib.auth.decorators import login_required
3 from django.core.paginator import Paginator
4 from django.db.models import Q
5 from django.http import HttpResponseNotFound, Http404
6 from django.shortcuts import get_object_or_404
7 from django.template.response import TemplateResponse
8 from django.utils import timezone
9 from django.utils.decorators import method_decorator
10 from django.views import View
11
12 from bookwyrm import activitystreams, forms, models
13 from bookwyrm.models.user import FeedFilterChoices
14 from bookwyrm.activitypub import ActivitypubResponse
15 from bookwyrm.settings import PAGE_LENGTH, STREAMS
16 from bookwyrm.suggested_users import suggested_users
17 from .helpers import filter_stream_by_status_type, get_user_from_username
18 from .helpers import is_api_request, is_bookwyrm_request, maybe_redirect_local_path
19 from .annual_summary import get_annual_summary_year
20
21
22 # pylint: disable= no-self-use
23 @method_decorator(login_required, name="dispatch")
24 class Feed(View):
25 """activity stream"""
26
27 def post(self, request, tab):
28 """save feed settings form, with a silent validation fail"""
29 filters_applied = False
30 form = forms.FeedStatusTypesForm(request.POST, instance=request.user)
31 if form.is_valid():
32 # workaround to avoid broadcasting this change
33 user = form.save(request, commit=False)
34 user.save(broadcast=False, update_fields=["feed_status_types"])
35 filters_applied = True
36
37 return self.get(request, tab, filters_applied)
38
39 def get(self, request, tab, filters_applied=False):
40 """user's homepage with activity feed"""
41 tab = [s for s in STREAMS if s["key"] == tab]
42 tab = tab[0] if tab else STREAMS[0]
43
44 activities = activitystreams.streams[tab["key"]].get_activity_stream(
45 request.user
46 )
47 filtered_activities = filter_stream_by_status_type(
48 activities,
49 allowed_types=request.user.feed_status_types,
50 )
51 paginated = Paginator(filtered_activities, PAGE_LENGTH)
52
53 suggestions = suggested_users.get_suggestions(request.user)
54
55 data = {
56 **feed_page_data(request.user),
57 **{
58 "user": request.user,
59 "activities": paginated.get_page(request.GET.get("page")),
60 "suggested_users": suggestions,
61 "tab": tab,
62 "streams": STREAMS,
63 "goal_form": forms.GoalForm(),
64 "feed_status_types_options": FeedFilterChoices,
65 "filters_applied": filters_applied,
66 "path": f"/{tab['key']}",
67 "annual_summary_year": get_annual_summary_year(),
68 "has_tour": True,
69 },
70 }
71 return TemplateResponse(request, "feed/feed.html", data)
72
73
74 @method_decorator(login_required, name="dispatch")
75 class DirectMessage(View):
76 """dm view"""
77
78 def get(self, request, username=None):
79 """like a feed but for dms only"""
80 # remove fancy subclasses of status, keep just good ol' notes
81 activities = (
82 models.Status.privacy_filter(request.user, privacy_levels=["direct"])
83 .filter(
84 review__isnull=True,
85 comment__isnull=True,
86 quotation__isnull=True,
87 generatednote__isnull=True,
88 )
89 .order_by("-published_date")
90 )
91
92 user = None
93 if username:
94 try:
95 user = get_user_from_username(request.user, username)
96 except Http404:
97 pass
98 if user:
99 activities = activities.filter(Q(user=user) | Q(mention_users=user))
100
101 paginated = Paginator(activities, PAGE_LENGTH)
102 data = {
103 **feed_page_data(request.user),
104 **{
105 "user": request.user,
106 "partner": user,
107 "activities": paginated.get_page(request.GET.get("page")),
108 "path": "/direct-messages",
109 },
110 }
111 return TemplateResponse(request, "feed/direct_messages.html", data)
112
113
114 class Status(View):
115 """get posting"""
116
117 # pylint: disable=unused-argument
118 def get(self, request, username, status_id, slug=None):
119 """display a particular status (and replies, etc)"""
120 user = get_user_from_username(request.user, username)
121 status = get_object_or_404(
122 models.Status.objects.select_subclasses(),
123 user=user,
124 id=status_id,
125 deleted=False,
126 )
127 # make sure the user is authorized to see the status
128 status.raise_visible_to_user(request.user)
129
130 if is_api_request(request):
131 return ActivitypubResponse(
132 status.to_activity(pure=not is_bookwyrm_request(request))
133 )
134
135 if redirect_local_path := maybe_redirect_local_path(request, status):
136 return redirect_local_path
137
138 visible_thread = (
139 models.Status.privacy_filter(request.user)
140 .filter(thread_id=status.thread_id)
141 .values_list("id", flat=True)
142 )
143 visible_thread = list(visible_thread)
144
145 ancestors = models.Status.objects.select_subclasses().raw(
146 """
147 WITH RECURSIVE get_thread(depth, id, path) AS (
148
149 SELECT 1, st.id, ARRAY[st.id]
150 FROM bookwyrm_status st
151 WHERE id = '%s' AND id = ANY(%s)
152
153 UNION
154
155 SELECT (gt.depth + 1), st.reply_parent_id, path || st.id
156 FROM get_thread gt, bookwyrm_status st
157
158 WHERE st.id = gt.id AND depth < 5 AND st.id = ANY(%s)
159
160 )
161
162 SELECT * FROM get_thread ORDER BY path DESC;
163 """,
164 params=[status.reply_parent_id or 0, visible_thread, visible_thread],
165 )
166 children = models.Status.objects.select_subclasses().raw(
167 """
168 WITH RECURSIVE get_thread(depth, id, path) AS (
169
170 SELECT 1, st.id, ARRAY[st.id]
171 FROM bookwyrm_status st
172 WHERE reply_parent_id = '%s' AND id = ANY(%s)
173
174 UNION
175
176 SELECT (gt.depth + 1), st.id, path || st.id
177 FROM get_thread gt, bookwyrm_status st
178
179 WHERE st.reply_parent_id = gt.id AND depth < 5 AND st.id = ANY(%s)
180
181 )
182
183 SELECT * FROM get_thread ORDER BY path;
184 """,
185 params=[status.id, visible_thread, visible_thread],
186 )
187
188 preview = None
189 if hasattr(status, "book"):
190 preview = status.book.preview_image
191 elif status.mention_books.exists():
192 preview = status.mention_books.first().preview_image
193
194 data = {
195 **feed_page_data(request.user),
196 **{
197 "status": status,
198 "children": children,
199 "ancestors": ancestors,
200 "preview": preview,
201 },
202 }
203 return TemplateResponse(request, "feed/status.html", data)
204
205
206 class Replies(View):
207 """replies page (a json view of status)"""
208
209 def get(self, request, username, status_id):
210 """ordered collection of replies to a status"""
211 # the html view is the same as Status
212 if not is_api_request(request):
213 status_view = Status.as_view()
214 return status_view(request, username, status_id)
215
216 # the json view is different than Status
217 status = models.Status.objects.get(id=status_id)
218 if status.user.localname != username:
219 return HttpResponseNotFound()
220 status.raise_visible_to_user(request.user)
221
222 return ActivitypubResponse(status.to_replies(**request.GET))
223
224
225 def feed_page_data(user):
226 """info we need for every feed page"""
227 if not user.is_authenticated:
228 return {}
229
230 goal = models.AnnualGoal.objects.filter(user=user, year=timezone.now().year).first()
231 return {
232 "goal": goal,
233 "goal_form": forms.GoalForm(),
234 }
235
236
237 def get_suggested_books(user, max_books=5):
238 """helper to get a user's recent books"""
239 book_count = 0
240 preset_shelves = {"reading": max_books, "read": 2, "to-read": max_books}
241 suggested_books = []
242
243 user_shelves = {
244 shelf.identifier: shelf
245 for shelf in user.shelf_set.filter(
246 identifier__in=preset_shelves.keys()
247 ).exclude(books__isnull=True)
248 }
249
250 for preset, shelf_max in preset_shelves.items():
251 limit = (
252 shelf_max
253 if shelf_max < (max_books - book_count)
254 else max_books - book_count
255 )
256 shelf = user_shelves.get(preset, None)
257 if not shelf:
258 continue
259
260 shelf_preview = {
261 "name": shelf.name,
262 "identifier": shelf.identifier,
263 "books": models.Edition.viewer_aware_objects(user)
264 .filter(
265 shelfbook__shelf=shelf,
266 )
267 .order_by("-shelfbook__shelved_date")
268 .prefetch_related("authors")[:limit],
269 }
270 suggested_books.append(shelf_preview)
271 book_count += len(shelf_preview["books"])
272 return suggested_books
273
[end of bookwyrm/views/feed.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bookwyrm/views/feed.py b/bookwyrm/views/feed.py
--- a/bookwyrm/views/feed.py
+++ b/bookwyrm/views/feed.py
@@ -1,4 +1,5 @@
""" non-interactive pages """
+from datetime import date
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.db.models import Q
@@ -52,6 +53,19 @@
suggestions = suggested_users.get_suggestions(request.user)
+ cutoff = (
+ date(get_annual_summary_year(), 12, 31)
+ if get_annual_summary_year()
+ else None
+ )
+ readthroughs = (
+ models.ReadThrough.objects.filter(
+ user=request.user, finish_date__lte=cutoff
+ )
+ if get_annual_summary_year()
+ else []
+ )
+
data = {
**feed_page_data(request.user),
**{
@@ -66,6 +80,7 @@
"path": f"/{tab['key']}",
"annual_summary_year": get_annual_summary_year(),
"has_tour": True,
+ "has_summary_read_throughs": len(readthroughs),
},
}
return TemplateResponse(request, "feed/feed.html", data)
|
{"golden_diff": "diff --git a/bookwyrm/views/feed.py b/bookwyrm/views/feed.py\n--- a/bookwyrm/views/feed.py\n+++ b/bookwyrm/views/feed.py\n@@ -1,4 +1,5 @@\n \"\"\" non-interactive pages \"\"\"\n+from datetime import date\n from django.contrib.auth.decorators import login_required\n from django.core.paginator import Paginator\n from django.db.models import Q\n@@ -52,6 +53,19 @@\n \n suggestions = suggested_users.get_suggestions(request.user)\n \n+ cutoff = (\n+ date(get_annual_summary_year(), 12, 31)\n+ if get_annual_summary_year()\n+ else None\n+ )\n+ readthroughs = (\n+ models.ReadThrough.objects.filter(\n+ user=request.user, finish_date__lte=cutoff\n+ )\n+ if get_annual_summary_year()\n+ else []\n+ )\n+\n data = {\n **feed_page_data(request.user),\n **{\n@@ -66,6 +80,7 @@\n \"path\": f\"/{tab['key']}\",\n \"annual_summary_year\": get_annual_summary_year(),\n \"has_tour\": True,\n+ \"has_summary_read_throughs\": len(readthroughs),\n },\n }\n return TemplateResponse(request, \"feed/feed.html\", data)\n", "issue": "\"year in the books\" should not appear for newly registered users\n**Describe the bug**\r\nA clear and concise description of what the bug is.\r\n\r\n**To Reproduce**\r\n1. Register a new account during the time \"{year} in the books\" appears\r\n2. Get a messsage encouraging you to see all the books you read in the year, which will be none.\r\n\r\n**Expected behavior**\r\nThis should not be displayed for users who have no readthroughs.\r\n\r\n**Screenshots**\r\n\r\n\r\n\r\n**Instance**\r\ndev site\r\n\r\n\n", "before_files": [{"content": "\"\"\" non-interactive pages \"\"\"\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.db.models import Q\nfrom django.http import HttpResponseNotFound, Http404\nfrom django.shortcuts import get_object_or_404\nfrom django.template.response import TemplateResponse\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom bookwyrm import activitystreams, forms, models\nfrom bookwyrm.models.user import FeedFilterChoices\nfrom bookwyrm.activitypub import ActivitypubResponse\nfrom bookwyrm.settings import PAGE_LENGTH, STREAMS\nfrom bookwyrm.suggested_users import suggested_users\nfrom .helpers import filter_stream_by_status_type, get_user_from_username\nfrom .helpers import is_api_request, is_bookwyrm_request, maybe_redirect_local_path\nfrom .annual_summary import get_annual_summary_year\n\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name=\"dispatch\")\nclass Feed(View):\n \"\"\"activity stream\"\"\"\n\n def post(self, request, tab):\n \"\"\"save feed settings form, with a silent validation fail\"\"\"\n filters_applied = False\n form = forms.FeedStatusTypesForm(request.POST, instance=request.user)\n if form.is_valid():\n # workaround to avoid broadcasting this change\n user = form.save(request, commit=False)\n user.save(broadcast=False, update_fields=[\"feed_status_types\"])\n filters_applied = True\n\n return self.get(request, tab, filters_applied)\n\n def get(self, request, tab, filters_applied=False):\n \"\"\"user's homepage with activity feed\"\"\"\n tab = [s for s in STREAMS if s[\"key\"] == tab]\n tab = tab[0] if tab else STREAMS[0]\n\n activities = activitystreams.streams[tab[\"key\"]].get_activity_stream(\n request.user\n )\n filtered_activities = filter_stream_by_status_type(\n activities,\n allowed_types=request.user.feed_status_types,\n )\n paginated = Paginator(filtered_activities, PAGE_LENGTH)\n\n suggestions = suggested_users.get_suggestions(request.user)\n\n data = {\n **feed_page_data(request.user),\n **{\n \"user\": request.user,\n \"activities\": paginated.get_page(request.GET.get(\"page\")),\n \"suggested_users\": suggestions,\n \"tab\": tab,\n \"streams\": STREAMS,\n \"goal_form\": forms.GoalForm(),\n \"feed_status_types_options\": FeedFilterChoices,\n \"filters_applied\": filters_applied,\n \"path\": f\"/{tab['key']}\",\n \"annual_summary_year\": get_annual_summary_year(),\n \"has_tour\": True,\n },\n }\n return TemplateResponse(request, \"feed/feed.html\", data)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass DirectMessage(View):\n \"\"\"dm view\"\"\"\n\n def get(self, request, username=None):\n \"\"\"like a feed but for dms only\"\"\"\n # remove fancy subclasses of status, keep just good ol' notes\n activities = (\n models.Status.privacy_filter(request.user, privacy_levels=[\"direct\"])\n .filter(\n review__isnull=True,\n comment__isnull=True,\n quotation__isnull=True,\n generatednote__isnull=True,\n )\n .order_by(\"-published_date\")\n )\n\n user = None\n if username:\n try:\n user = get_user_from_username(request.user, username)\n except Http404:\n pass\n if user:\n activities = activities.filter(Q(user=user) | Q(mention_users=user))\n\n paginated = Paginator(activities, PAGE_LENGTH)\n data = {\n **feed_page_data(request.user),\n **{\n \"user\": request.user,\n \"partner\": user,\n \"activities\": paginated.get_page(request.GET.get(\"page\")),\n \"path\": \"/direct-messages\",\n },\n }\n return TemplateResponse(request, \"feed/direct_messages.html\", data)\n\n\nclass Status(View):\n \"\"\"get posting\"\"\"\n\n # pylint: disable=unused-argument\n def get(self, request, username, status_id, slug=None):\n \"\"\"display a particular status (and replies, etc)\"\"\"\n user = get_user_from_username(request.user, username)\n status = get_object_or_404(\n models.Status.objects.select_subclasses(),\n user=user,\n id=status_id,\n deleted=False,\n )\n # make sure the user is authorized to see the status\n status.raise_visible_to_user(request.user)\n\n if is_api_request(request):\n return ActivitypubResponse(\n status.to_activity(pure=not is_bookwyrm_request(request))\n )\n\n if redirect_local_path := maybe_redirect_local_path(request, status):\n return redirect_local_path\n\n visible_thread = (\n models.Status.privacy_filter(request.user)\n .filter(thread_id=status.thread_id)\n .values_list(\"id\", flat=True)\n )\n visible_thread = list(visible_thread)\n\n ancestors = models.Status.objects.select_subclasses().raw(\n \"\"\"\n WITH RECURSIVE get_thread(depth, id, path) AS (\n\n SELECT 1, st.id, ARRAY[st.id]\n FROM bookwyrm_status st\n WHERE id = '%s' AND id = ANY(%s)\n\n UNION\n\n SELECT (gt.depth + 1), st.reply_parent_id, path || st.id\n FROM get_thread gt, bookwyrm_status st\n\n WHERE st.id = gt.id AND depth < 5 AND st.id = ANY(%s)\n\n )\n\n SELECT * FROM get_thread ORDER BY path DESC;\n \"\"\",\n params=[status.reply_parent_id or 0, visible_thread, visible_thread],\n )\n children = models.Status.objects.select_subclasses().raw(\n \"\"\"\n WITH RECURSIVE get_thread(depth, id, path) AS (\n\n SELECT 1, st.id, ARRAY[st.id]\n FROM bookwyrm_status st\n WHERE reply_parent_id = '%s' AND id = ANY(%s)\n\n UNION\n\n SELECT (gt.depth + 1), st.id, path || st.id\n FROM get_thread gt, bookwyrm_status st\n\n WHERE st.reply_parent_id = gt.id AND depth < 5 AND st.id = ANY(%s)\n\n )\n\n SELECT * FROM get_thread ORDER BY path;\n \"\"\",\n params=[status.id, visible_thread, visible_thread],\n )\n\n preview = None\n if hasattr(status, \"book\"):\n preview = status.book.preview_image\n elif status.mention_books.exists():\n preview = status.mention_books.first().preview_image\n\n data = {\n **feed_page_data(request.user),\n **{\n \"status\": status,\n \"children\": children,\n \"ancestors\": ancestors,\n \"preview\": preview,\n },\n }\n return TemplateResponse(request, \"feed/status.html\", data)\n\n\nclass Replies(View):\n \"\"\"replies page (a json view of status)\"\"\"\n\n def get(self, request, username, status_id):\n \"\"\"ordered collection of replies to a status\"\"\"\n # the html view is the same as Status\n if not is_api_request(request):\n status_view = Status.as_view()\n return status_view(request, username, status_id)\n\n # the json view is different than Status\n status = models.Status.objects.get(id=status_id)\n if status.user.localname != username:\n return HttpResponseNotFound()\n status.raise_visible_to_user(request.user)\n\n return ActivitypubResponse(status.to_replies(**request.GET))\n\n\ndef feed_page_data(user):\n \"\"\"info we need for every feed page\"\"\"\n if not user.is_authenticated:\n return {}\n\n goal = models.AnnualGoal.objects.filter(user=user, year=timezone.now().year).first()\n return {\n \"goal\": goal,\n \"goal_form\": forms.GoalForm(),\n }\n\n\ndef get_suggested_books(user, max_books=5):\n \"\"\"helper to get a user's recent books\"\"\"\n book_count = 0\n preset_shelves = {\"reading\": max_books, \"read\": 2, \"to-read\": max_books}\n suggested_books = []\n\n user_shelves = {\n shelf.identifier: shelf\n for shelf in user.shelf_set.filter(\n identifier__in=preset_shelves.keys()\n ).exclude(books__isnull=True)\n }\n\n for preset, shelf_max in preset_shelves.items():\n limit = (\n shelf_max\n if shelf_max < (max_books - book_count)\n else max_books - book_count\n )\n shelf = user_shelves.get(preset, None)\n if not shelf:\n continue\n\n shelf_preview = {\n \"name\": shelf.name,\n \"identifier\": shelf.identifier,\n \"books\": models.Edition.viewer_aware_objects(user)\n .filter(\n shelfbook__shelf=shelf,\n )\n .order_by(\"-shelfbook__shelved_date\")\n .prefetch_related(\"authors\")[:limit],\n }\n suggested_books.append(shelf_preview)\n book_count += len(shelf_preview[\"books\"])\n return suggested_books\n", "path": "bookwyrm/views/feed.py"}]}
| 3,388 | 284 |
gh_patches_debug_7675
|
rasdani/github-patches
|
git_diff
|
mozilla__bugbug-131
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add number and length of comments as a feature
</issue>
<code>
[start of bugbug/bug_features.py]
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import re
7 from datetime import datetime
8 from datetime import timezone
9
10 import pandas as pd
11 from libmozdata import versions
12 from sklearn.base import BaseEstimator
13 from sklearn.base import TransformerMixin
14
15 from bugbug import bug_snapshot
16 from bugbug import repository
17
18
19 def field(bug, field):
20 if field in bug and bug[field] != '---':
21 return bug[field]
22
23 return None
24
25
26 class has_str(object):
27 def __call__(self, bug):
28 return field(bug, 'cf_has_str')
29
30
31 class has_regression_range(object):
32 def __call__(self, bug):
33 return field(bug, 'cf_has_regression_range')
34
35
36 class has_crash_signature(object):
37 def __call__(self, bug):
38 return 'cf_crash_signature' in bug and bug['cf_crash_signature'] != ''
39
40
41 class keywords(object):
42 def __init__(self, to_ignore=set()):
43 self.to_ignore = to_ignore
44
45 def __call__(self, bug):
46 keywords = []
47 subkeywords = []
48 for keyword in bug['keywords']:
49 if keyword in self.to_ignore:
50 continue
51
52 keywords.append(keyword)
53
54 if keyword.startswith('sec-'):
55 subkeywords.append('sec-')
56 elif keyword.startswith('csectype-'):
57 subkeywords.append('csectype-')
58 return keywords + subkeywords
59
60
61 class severity(object):
62 def __call__(self, bug):
63 return field(bug, 'severity')
64
65
66 class is_coverity_issue(object):
67 def __call__(self, bug):
68 return re.search('[CID ?[0-9]+]', bug['summary']) is not None or re.search('[CID ?[0-9]+]', bug['whiteboard']) is not None
69
70
71 class has_url(object):
72 def __call__(self, bug):
73 return bug['url'] != ''
74
75
76 class has_w3c_url(object):
77 def __call__(self, bug):
78 return 'w3c' in bug['url']
79
80
81 class has_github_url(object):
82 def __call__(self, bug):
83 return 'github' in bug['url']
84
85
86 class whiteboard(object):
87 def __call__(self, bug):
88
89 # Split by '['
90 paren_splits = bug['whiteboard'].lower().split('[')
91
92 # Split splits by space if they weren't in [ and ].
93 splits = []
94 for paren_split in paren_splits:
95 if ']' in paren_split:
96 paren_split = paren_split.split(']')
97 splits += paren_split
98 else:
99 splits += paren_split.split(' ')
100
101 # Remove empty splits and strip
102 splits = [split.strip() for split in splits if split.strip() != '']
103
104 # For splits which contain ':', return both the whole string and the string before ':'.
105 splits += [split.split(':', 1)[0] for split in splits if ':' in split]
106
107 return splits
108
109
110 class patches(object):
111 def __call__(self, bug):
112 return sum(1 for a in bug['attachments'] if a['is_patch'] or a['content_type'] in ['text/x-review-board-request', 'text/x-phabricator-request'])
113
114
115 class landings(object):
116 def __call__(self, bug):
117 return sum(1 for c in bug['comments'] if '://hg.mozilla.org/' in c['text'])
118
119
120 class title(object):
121 def __call__(self, bug):
122 ret = []
123
124 keywords = [
125 'fail',
126 ]
127 for keyword in keywords:
128 if keyword in bug['summary'].lower():
129 ret.append(keyword)
130
131 return ret
132
133
134 class product(object):
135 def __call__(self, bug):
136 return bug['product']
137
138
139 class component(object):
140 def __call__(self, bug):
141 return bug['component']
142
143
144 class is_mozillian(object):
145 def __call__(self, bug):
146 return any(bug['creator_detail']['email'].endswith(domain) for domain in ['@mozilla.com', '@mozilla.org'])
147
148
149 class delta_request_merge(object):
150 def __call__(self, bug):
151 for history in bug['history']:
152 for change in history['changes']:
153 if change['added'].startswith('approval-mozilla'):
154 uplift_request_datetime = datetime.strptime(history['when'], '%Y-%m-%dT%H:%M:%SZ').replace(tzinfo=timezone.utc)
155 timedelta = versions.getCloserRelease(uplift_request_datetime)[1] - uplift_request_datetime
156 return timedelta.days + timedelta.seconds / (24 * 60 * 60)
157
158 return None
159
160
161 class commit_added(object):
162 def __call__(self, bug):
163 return sum(commit['added'] for commit in bug['commits'])
164
165
166 class commit_deleted(object):
167 def __call__(self, bug):
168 return sum(commit['deleted'] for commit in bug['commits'])
169
170
171 class commit_types(object):
172 def __call__(self, bug):
173 return sum((commit['types'] for commit in bug['commits']), [])
174
175
176 class blocked_bugs_number(object):
177 def __call__(self, bug):
178 return len(bug['blocks'])
179
180
181 class priority(object):
182 def __call__(self, bug):
183 return bug['priority']
184
185
186 def cleanup_url(text):
187 text = re.sub(r'http[s]?://(hg.mozilla|searchfox|dxr.mozilla)\S+', '__CODE_REFERENCE_URL__', text)
188 return re.sub(r'http\S+', '__URL__', text)
189
190
191 def cleanup_fileref(text):
192 return re.sub(r'\w+\.py\b|\w+\.json\b|\w+\.js\b|\w+\.jsm\b|\w+\.html\b|\w+\.css\b|\w+\.c\b|\w+\.cpp\b|\w+\.h\b', '__FILE_REFERENCE__', text)
193
194
195 def cleanup_responses(text):
196 return re.sub('>[^\n]+', ' ', text)
197
198
199 def cleanup_hex(text):
200 return re.sub(r'\b0[xX][0-9a-fA-F]+\b', '__HEX_NUMBER__', text)
201
202
203 def cleanup_dll(text):
204 return re.sub(r'\w+(\.dll|\.so|\.dylib)\b', '__DLL_NAME__', text)
205
206
207 def cleanup_synonyms(text):
208 synonyms = [
209 ('safemode', ['safemode', 'safe mode']),
210 ('str', ['str', 'steps to reproduce', 'repro steps']),
211 ('uaf', ['uaf', 'use after free', 'use-after-free']),
212 ('asan', ['asan', 'address sanitizer', 'addresssanitizer']),
213 ('permafailure', ['permafailure', 'permafailing', 'permafail', 'perma failure', 'perma failing', 'perma fail', 'perma-failure', 'perma-failing', 'perma-fail']),
214 ('spec', ['spec', 'specification']),
215 ]
216
217 for synonym_group, synonym_list in synonyms:
218 text = re.sub('|'.join(fr'\b{synonym}\b' for synonym in synonym_list), synonym_group, text, flags=re.IGNORECASE)
219
220 return text
221
222
223 def cleanup_crash(text):
224 return re.sub(r'bp-[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{6}[0-9]{6}\b', '__CRASH_STATS_LINK__', text)
225
226
227 class BugExtractor(BaseEstimator, TransformerMixin):
228 def __init__(self, feature_extractors, cleanup_functions, rollback=False, rollback_when=None, commit_data=False):
229 self.feature_extractors = feature_extractors
230 self.cleanup_functions = cleanup_functions
231 self.rollback = rollback
232 self.rollback_when = rollback_when
233 self.commit_map = repository.get_commit_map() if commit_data else None
234
235 def fit(self, x, y=None):
236 return self
237
238 def transform(self, bugs):
239 results = []
240
241 for bug in bugs:
242 bug_id = bug['id']
243
244 if self.rollback:
245 bug = bug_snapshot.rollback(bug, self.rollback_when)
246
247 data = {}
248
249 if self.commit_map is not None:
250 if bug_id in self.commit_map:
251 bug['commits'] = self.commit_map[bug_id]
252 else:
253 bug['commits'] = []
254
255 for f in self.feature_extractors:
256 res = f(bug)
257
258 if res is None:
259 continue
260
261 if isinstance(res, list):
262 for item in res:
263 data[f.__class__.__name__ + '-' + item] = 'True'
264 continue
265
266 if isinstance(res, bool):
267 res = str(res)
268
269 data[f.__class__.__name__] = res
270
271 # TODO: Try simply using all possible fields instead of extracting features manually.
272
273 for cleanup_function in self.cleanup_functions:
274 bug['summary'] = cleanup_function(bug['summary'])
275 for c in bug['comments']:
276 c['text'] = cleanup_function(c['text'])
277
278 result = {
279 'data': data,
280 'title': bug['summary'],
281 'first_comment': bug['comments'][0]['text'],
282 'comments': ' '.join([c['text'] for c in bug['comments']]),
283 }
284
285 results.append(result)
286
287 return pd.DataFrame(results)
288
[end of bugbug/bug_features.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bugbug/bug_features.py b/bugbug/bug_features.py
--- a/bugbug/bug_features.py
+++ b/bugbug/bug_features.py
@@ -183,6 +183,16 @@
return bug['priority']
+class comment_count(object):
+ def __call__(self, bug):
+ return field(bug, 'comment_count')
+
+
+class comment_length(object):
+ def __call__(self, bug):
+ return sum(len(x['text']) for x in bug['comments'])
+
+
def cleanup_url(text):
text = re.sub(r'http[s]?://(hg.mozilla|searchfox|dxr.mozilla)\S+', '__CODE_REFERENCE_URL__', text)
return re.sub(r'http\S+', '__URL__', text)
|
{"golden_diff": "diff --git a/bugbug/bug_features.py b/bugbug/bug_features.py\n--- a/bugbug/bug_features.py\n+++ b/bugbug/bug_features.py\n@@ -183,6 +183,16 @@\n return bug['priority']\n \n \n+class comment_count(object):\n+ def __call__(self, bug):\n+ return field(bug, 'comment_count')\n+\n+\n+class comment_length(object):\n+ def __call__(self, bug):\n+ return sum(len(x['text']) for x in bug['comments'])\n+\n+\n def cleanup_url(text):\n text = re.sub(r'http[s]?://(hg.mozilla|searchfox|dxr.mozilla)\\S+', '__CODE_REFERENCE_URL__', text)\n return re.sub(r'http\\S+', '__URL__', text)\n", "issue": "Add number and length of comments as a feature\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport re\nfrom datetime import datetime\nfrom datetime import timezone\n\nimport pandas as pd\nfrom libmozdata import versions\nfrom sklearn.base import BaseEstimator\nfrom sklearn.base import TransformerMixin\n\nfrom bugbug import bug_snapshot\nfrom bugbug import repository\n\n\ndef field(bug, field):\n if field in bug and bug[field] != '---':\n return bug[field]\n\n return None\n\n\nclass has_str(object):\n def __call__(self, bug):\n return field(bug, 'cf_has_str')\n\n\nclass has_regression_range(object):\n def __call__(self, bug):\n return field(bug, 'cf_has_regression_range')\n\n\nclass has_crash_signature(object):\n def __call__(self, bug):\n return 'cf_crash_signature' in bug and bug['cf_crash_signature'] != ''\n\n\nclass keywords(object):\n def __init__(self, to_ignore=set()):\n self.to_ignore = to_ignore\n\n def __call__(self, bug):\n keywords = []\n subkeywords = []\n for keyword in bug['keywords']:\n if keyword in self.to_ignore:\n continue\n\n keywords.append(keyword)\n\n if keyword.startswith('sec-'):\n subkeywords.append('sec-')\n elif keyword.startswith('csectype-'):\n subkeywords.append('csectype-')\n return keywords + subkeywords\n\n\nclass severity(object):\n def __call__(self, bug):\n return field(bug, 'severity')\n\n\nclass is_coverity_issue(object):\n def __call__(self, bug):\n return re.search('[CID ?[0-9]+]', bug['summary']) is not None or re.search('[CID ?[0-9]+]', bug['whiteboard']) is not None\n\n\nclass has_url(object):\n def __call__(self, bug):\n return bug['url'] != ''\n\n\nclass has_w3c_url(object):\n def __call__(self, bug):\n return 'w3c' in bug['url']\n\n\nclass has_github_url(object):\n def __call__(self, bug):\n return 'github' in bug['url']\n\n\nclass whiteboard(object):\n def __call__(self, bug):\n\n # Split by '['\n paren_splits = bug['whiteboard'].lower().split('[')\n\n # Split splits by space if they weren't in [ and ].\n splits = []\n for paren_split in paren_splits:\n if ']' in paren_split:\n paren_split = paren_split.split(']')\n splits += paren_split\n else:\n splits += paren_split.split(' ')\n\n # Remove empty splits and strip\n splits = [split.strip() for split in splits if split.strip() != '']\n\n # For splits which contain ':', return both the whole string and the string before ':'.\n splits += [split.split(':', 1)[0] for split in splits if ':' in split]\n\n return splits\n\n\nclass patches(object):\n def __call__(self, bug):\n return sum(1 for a in bug['attachments'] if a['is_patch'] or a['content_type'] in ['text/x-review-board-request', 'text/x-phabricator-request'])\n\n\nclass landings(object):\n def __call__(self, bug):\n return sum(1 for c in bug['comments'] if '://hg.mozilla.org/' in c['text'])\n\n\nclass title(object):\n def __call__(self, bug):\n ret = []\n\n keywords = [\n 'fail',\n ]\n for keyword in keywords:\n if keyword in bug['summary'].lower():\n ret.append(keyword)\n\n return ret\n\n\nclass product(object):\n def __call__(self, bug):\n return bug['product']\n\n\nclass component(object):\n def __call__(self, bug):\n return bug['component']\n\n\nclass is_mozillian(object):\n def __call__(self, bug):\n return any(bug['creator_detail']['email'].endswith(domain) for domain in ['@mozilla.com', '@mozilla.org'])\n\n\nclass delta_request_merge(object):\n def __call__(self, bug):\n for history in bug['history']:\n for change in history['changes']:\n if change['added'].startswith('approval-mozilla'):\n uplift_request_datetime = datetime.strptime(history['when'], '%Y-%m-%dT%H:%M:%SZ').replace(tzinfo=timezone.utc)\n timedelta = versions.getCloserRelease(uplift_request_datetime)[1] - uplift_request_datetime\n return timedelta.days + timedelta.seconds / (24 * 60 * 60)\n\n return None\n\n\nclass commit_added(object):\n def __call__(self, bug):\n return sum(commit['added'] for commit in bug['commits'])\n\n\nclass commit_deleted(object):\n def __call__(self, bug):\n return sum(commit['deleted'] for commit in bug['commits'])\n\n\nclass commit_types(object):\n def __call__(self, bug):\n return sum((commit['types'] for commit in bug['commits']), [])\n\n\nclass blocked_bugs_number(object):\n def __call__(self, bug):\n return len(bug['blocks'])\n\n\nclass priority(object):\n def __call__(self, bug):\n return bug['priority']\n\n\ndef cleanup_url(text):\n text = re.sub(r'http[s]?://(hg.mozilla|searchfox|dxr.mozilla)\\S+', '__CODE_REFERENCE_URL__', text)\n return re.sub(r'http\\S+', '__URL__', text)\n\n\ndef cleanup_fileref(text):\n return re.sub(r'\\w+\\.py\\b|\\w+\\.json\\b|\\w+\\.js\\b|\\w+\\.jsm\\b|\\w+\\.html\\b|\\w+\\.css\\b|\\w+\\.c\\b|\\w+\\.cpp\\b|\\w+\\.h\\b', '__FILE_REFERENCE__', text)\n\n\ndef cleanup_responses(text):\n return re.sub('>[^\\n]+', ' ', text)\n\n\ndef cleanup_hex(text):\n return re.sub(r'\\b0[xX][0-9a-fA-F]+\\b', '__HEX_NUMBER__', text)\n\n\ndef cleanup_dll(text):\n return re.sub(r'\\w+(\\.dll|\\.so|\\.dylib)\\b', '__DLL_NAME__', text)\n\n\ndef cleanup_synonyms(text):\n synonyms = [\n ('safemode', ['safemode', 'safe mode']),\n ('str', ['str', 'steps to reproduce', 'repro steps']),\n ('uaf', ['uaf', 'use after free', 'use-after-free']),\n ('asan', ['asan', 'address sanitizer', 'addresssanitizer']),\n ('permafailure', ['permafailure', 'permafailing', 'permafail', 'perma failure', 'perma failing', 'perma fail', 'perma-failure', 'perma-failing', 'perma-fail']),\n ('spec', ['spec', 'specification']),\n ]\n\n for synonym_group, synonym_list in synonyms:\n text = re.sub('|'.join(fr'\\b{synonym}\\b' for synonym in synonym_list), synonym_group, text, flags=re.IGNORECASE)\n\n return text\n\n\ndef cleanup_crash(text):\n return re.sub(r'bp-[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{6}[0-9]{6}\\b', '__CRASH_STATS_LINK__', text)\n\n\nclass BugExtractor(BaseEstimator, TransformerMixin):\n def __init__(self, feature_extractors, cleanup_functions, rollback=False, rollback_when=None, commit_data=False):\n self.feature_extractors = feature_extractors\n self.cleanup_functions = cleanup_functions\n self.rollback = rollback\n self.rollback_when = rollback_when\n self.commit_map = repository.get_commit_map() if commit_data else None\n\n def fit(self, x, y=None):\n return self\n\n def transform(self, bugs):\n results = []\n\n for bug in bugs:\n bug_id = bug['id']\n\n if self.rollback:\n bug = bug_snapshot.rollback(bug, self.rollback_when)\n\n data = {}\n\n if self.commit_map is not None:\n if bug_id in self.commit_map:\n bug['commits'] = self.commit_map[bug_id]\n else:\n bug['commits'] = []\n\n for f in self.feature_extractors:\n res = f(bug)\n\n if res is None:\n continue\n\n if isinstance(res, list):\n for item in res:\n data[f.__class__.__name__ + '-' + item] = 'True'\n continue\n\n if isinstance(res, bool):\n res = str(res)\n\n data[f.__class__.__name__] = res\n\n # TODO: Try simply using all possible fields instead of extracting features manually.\n\n for cleanup_function in self.cleanup_functions:\n bug['summary'] = cleanup_function(bug['summary'])\n for c in bug['comments']:\n c['text'] = cleanup_function(c['text'])\n\n result = {\n 'data': data,\n 'title': bug['summary'],\n 'first_comment': bug['comments'][0]['text'],\n 'comments': ' '.join([c['text'] for c in bug['comments']]),\n }\n\n results.append(result)\n\n return pd.DataFrame(results)\n", "path": "bugbug/bug_features.py"}]}
| 3,394 | 173 |
gh_patches_debug_7083
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-5222
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CKV_GIT_4 always fail with terraform_plan
**Describe the issue**
Once a GitHub org/repo secret is created and stored in the terraform state, the check CKV_GIT_4 will always fail when scanning terraform plans even if the value was created using an encrypted value.
It seems like the check consider `"plaintext_text": ""` to be a hard-coded secret because if I remove that line from the plan or change it to `"plaintext_text": null`, the check passes.
```
"resources": [
{
"address": "github_actions_organization_secret.my_github_secret",
"mode": "managed",
"type": "github_actions_organization_secret",
"name": "my_github_secret",
"provider_name": "registry.terraform.io/integrations/github",
"schema_version": 0,
"values": {
"created_at": "2023-05-17 13:54:59 +0000 UTC",
"encrypted_value": "MIr5c6eSzTJeGW/uyB0u...",
"id": "MY_GITHUB_SECRET",
"plaintext_value": "",
"secret_name": "MY_GITHUB_SECRET",
"selected_repository_ids": [],
"updated_at": "2023-05-17 13:54:59 +0000 UTC",
"visibility": "all"
},
"sensitive_values": {
"selected_repository_ids": []
}
}
```
**Examples**
**Version (please complete the following information):**
- Checkov Version 2.3.223
**Additional context**
Add any other context about the problem here.
</issue>
<code>
[start of checkov/terraform/checks/resource/github/SecretsEncrypted.py]
1 from typing import List, Any, Dict
2
3 from checkov.common.models.enums import CheckCategories, CheckResult
4 from checkov.terraform.checks.resource.base_resource_negative_value_check import BaseResourceNegativeValueCheck
5 from checkov.common.models.consts import ANY_VALUE
6
7
8 class SecretsEncrypted(BaseResourceNegativeValueCheck):
9 def __init__(self) -> None:
10 # -from github docs "It is also advised that you do not store plaintext values in your code but rather populate
11 # the encrypted_value using fields from a resource, data source or variable as,
12 # while encrypted in state, these will be easily accessible in your code"
13 name = "Ensure GitHub Actions secrets are encrypted"
14 id = "CKV_GIT_4"
15 supported_resources = (
16 "github_actions_environment_secret",
17 "github_actions_organization_secret",
18 "github_actions_secret",
19 )
20 categories = (CheckCategories.ENCRYPTION,)
21 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
22
23 def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:
24 plaintext = conf.get("plaintext_value")
25 if plaintext and self._is_variable_dependant(plaintext[0]):
26 return CheckResult.UNKNOWN
27
28 return super().scan_resource_conf(conf)
29
30 def get_inspected_key(self) -> str:
31 return "plaintext_value"
32
33 def get_forbidden_values(self) -> List[Any]:
34 return [ANY_VALUE]
35
36
37 check = SecretsEncrypted()
38
[end of checkov/terraform/checks/resource/github/SecretsEncrypted.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/checkov/terraform/checks/resource/github/SecretsEncrypted.py b/checkov/terraform/checks/resource/github/SecretsEncrypted.py
--- a/checkov/terraform/checks/resource/github/SecretsEncrypted.py
+++ b/checkov/terraform/checks/resource/github/SecretsEncrypted.py
@@ -25,6 +25,10 @@
if plaintext and self._is_variable_dependant(plaintext[0]):
return CheckResult.UNKNOWN
+ if isinstance(plaintext, list) and not plaintext[0]:
+ # this happens mainly in TF plan files, because the value is just an empty string
+ return CheckResult.PASSED
+
return super().scan_resource_conf(conf)
def get_inspected_key(self) -> str:
|
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/github/SecretsEncrypted.py b/checkov/terraform/checks/resource/github/SecretsEncrypted.py\n--- a/checkov/terraform/checks/resource/github/SecretsEncrypted.py\n+++ b/checkov/terraform/checks/resource/github/SecretsEncrypted.py\n@@ -25,6 +25,10 @@\n if plaintext and self._is_variable_dependant(plaintext[0]):\n return CheckResult.UNKNOWN\n \n+ if isinstance(plaintext, list) and not plaintext[0]:\n+ # this happens mainly in TF plan files, because the value is just an empty string\n+ return CheckResult.PASSED\n+\n return super().scan_resource_conf(conf)\n \n def get_inspected_key(self) -> str:\n", "issue": "CKV_GIT_4 always fail with terraform_plan\n**Describe the issue**\r\nOnce a GitHub org/repo secret is created and stored in the terraform state, the check CKV_GIT_4 will always fail when scanning terraform plans even if the value was created using an encrypted value.\r\n\r\nIt seems like the check consider `\"plaintext_text\": \"\"` to be a hard-coded secret because if I remove that line from the plan or change it to `\"plaintext_text\": null`, the check passes.\r\n\r\n```\r\n \"resources\": [\r\n {\r\n \"address\": \"github_actions_organization_secret.my_github_secret\",\r\n \"mode\": \"managed\",\r\n \"type\": \"github_actions_organization_secret\",\r\n \"name\": \"my_github_secret\",\r\n \"provider_name\": \"registry.terraform.io/integrations/github\",\r\n \"schema_version\": 0,\r\n \"values\": {\r\n \"created_at\": \"2023-05-17 13:54:59 +0000 UTC\",\r\n \"encrypted_value\": \"MIr5c6eSzTJeGW/uyB0u...\",\r\n \"id\": \"MY_GITHUB_SECRET\",\r\n \"plaintext_value\": \"\",\r\n \"secret_name\": \"MY_GITHUB_SECRET\",\r\n \"selected_repository_ids\": [],\r\n \"updated_at\": \"2023-05-17 13:54:59 +0000 UTC\",\r\n \"visibility\": \"all\"\r\n },\r\n \"sensitive_values\": {\r\n \"selected_repository_ids\": []\r\n }\r\n }\r\n```\r\n\r\n**Examples**\r\n\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version 2.3.223\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "from typing import List, Any, Dict\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_negative_value_check import BaseResourceNegativeValueCheck\nfrom checkov.common.models.consts import ANY_VALUE\n\n\nclass SecretsEncrypted(BaseResourceNegativeValueCheck):\n def __init__(self) -> None:\n # -from github docs \"It is also advised that you do not store plaintext values in your code but rather populate\n # the encrypted_value using fields from a resource, data source or variable as,\n # while encrypted in state, these will be easily accessible in your code\"\n name = \"Ensure GitHub Actions secrets are encrypted\"\n id = \"CKV_GIT_4\"\n supported_resources = (\n \"github_actions_environment_secret\",\n \"github_actions_organization_secret\",\n \"github_actions_secret\",\n )\n categories = (CheckCategories.ENCRYPTION,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:\n plaintext = conf.get(\"plaintext_value\")\n if plaintext and self._is_variable_dependant(plaintext[0]):\n return CheckResult.UNKNOWN\n\n return super().scan_resource_conf(conf)\n\n def get_inspected_key(self) -> str:\n return \"plaintext_value\"\n\n def get_forbidden_values(self) -> List[Any]:\n return [ANY_VALUE]\n\n\ncheck = SecretsEncrypted()\n", "path": "checkov/terraform/checks/resource/github/SecretsEncrypted.py"}]}
| 1,315 | 168 |
gh_patches_debug_4713
|
rasdani/github-patches
|
git_diff
|
cal-itp__benefits-210
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Secure critical Django cookies
The following can be configured in [`settings.py`](https://github.com/cal-itp/benefits/blob/dev/benefits/settings.py).
Setting these ensures the cookies expire and are deleted when the user's browser closes (makes them session cookies)
* [x] [`CSRF_COOKIE_AGE`](https://docs.djangoproject.com/en/3.2/ref/settings/#csrf-cookie-age) = None
* [x] ~[`SESSION_COOKIE_AGE`](https://docs.djangoproject.com/en/3.2/ref/settings/#session-cookie-age) = None~
* [x] [`SESSION_EXPIRE_AT_BROWSER_CLOSE `](https://docs.djangoproject.com/en/3.2/ref/settings/#session-cookie-age) = True
We also want to prevent the CSRF cookie from being sent in cross-browser requests. E.g. a user clicks on a link to our site, from another site - in this case we don't want any previous CSRF cookie already in their browser sent. It's unlikely to be an issue in our app since there are no user logins etc. but for consistency with our other cookies (session and language).
* [x] [`CSRF_COOKIE_SAMESITE`](https://docs.djangoproject.com/en/3.2/ref/settings/#csrf-cookie-samesite) = "Strict"
</issue>
<code>
[start of benefits/settings.py]
1 """
2 Django settings for benefits project.
3 """
4 import os
5
6 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
7 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
8
9 # SECURITY WARNING: keep the secret key used in production secret!
10 SECRET_KEY = os.environ["DJANGO_SECRET_KEY"]
11
12 # SECURITY WARNING: don't run with debug turned on in production!
13 DEBUG = os.environ.get("DJANGO_DEBUG", "False").lower() == "true"
14
15 ADMIN = os.environ.get("DJANGO_ADMIN", "False").lower() == "true"
16
17 ALLOWED_HOSTS = []
18
19 if DEBUG:
20 ALLOWED_HOSTS.extend(["*"])
21 else:
22 hosts = os.environ["DJANGO_ALLOWED_HOSTS"].split(",")
23 ALLOWED_HOSTS.extend(hosts)
24
25 # Application definition
26
27 INSTALLED_APPS = [
28 "django.contrib.sessions",
29 "django.contrib.staticfiles",
30 "benefits.core",
31 "benefits.enrollment",
32 "benefits.eligibility",
33 ]
34
35 if ADMIN:
36 INSTALLED_APPS.extend(
37 [
38 "django.contrib.admin",
39 "django.contrib.auth",
40 "django.contrib.contenttypes",
41 "django.contrib.messages",
42 ]
43 )
44
45 MIDDLEWARE = [
46 "django.middleware.security.SecurityMiddleware",
47 "django.contrib.sessions.middleware.SessionMiddleware",
48 "django.middleware.locale.LocaleMiddleware",
49 "benefits.core.middleware.Healthcheck",
50 "django.middleware.common.CommonMiddleware",
51 "django.middleware.csrf.CsrfViewMiddleware",
52 "django.middleware.clickjacking.XFrameOptionsMiddleware",
53 "benefits.core.middleware.DebugSession",
54 "benefits.core.middleware.ChangedLanguageEvent",
55 ]
56
57 if ADMIN:
58 MIDDLEWARE.extend(
59 [
60 "django.contrib.auth.middleware.AuthenticationMiddleware",
61 "django.contrib.messages.middleware.MessageMiddleware",
62 ]
63 )
64
65 CSRF_COOKIE_HTTPONLY = True
66
67 SESSION_COOKIE_AGE = 3600
68 SESSION_COOKIE_SAMESITE = "Strict"
69 SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies"
70
71 if not DEBUG:
72 CSRF_COOKIE_SECURE = True
73 CSRF_FAILURE_VIEW = "benefits.core.views.csrf_failure"
74 SESSION_COOKIE_SECURE = True
75
76 ROOT_URLCONF = "benefits.urls"
77
78 template_ctx_processors = [
79 "django.template.context_processors.request",
80 "benefits.core.context_processors.analytics",
81 ]
82
83 if DEBUG:
84 template_ctx_processors.extend(
85 [
86 "django.template.context_processors.debug",
87 "benefits.core.context_processors.debug",
88 ]
89 )
90
91 if ADMIN:
92 template_ctx_processors.extend(
93 [
94 "django.contrib.auth.context_processors.auth",
95 "django.contrib.messages.context_processors.messages",
96 ]
97 )
98
99 TEMPLATES = [
100 {
101 "BACKEND": "django.template.backends.django.DjangoTemplates",
102 "DIRS": [os.path.join(BASE_DIR, "benefits", "templates")],
103 "APP_DIRS": True,
104 "OPTIONS": {
105 "context_processors": template_ctx_processors,
106 },
107 },
108 ]
109
110 WSGI_APPLICATION = "benefits.wsgi.application"
111
112 DATABASES = {
113 "default": {
114 "ENGINE": "django.db.backends.sqlite3",
115 "NAME": os.environ.get("DJANGO_DB", "django") + ".db",
116 }
117 }
118
119 # Password validation
120
121 AUTH_PASSWORD_VALIDATORS = []
122
123 if ADMIN:
124 AUTH_PASSWORD_VALIDATORS.extend(
125 [
126 {
127 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
128 },
129 {
130 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
131 },
132 {
133 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
134 },
135 {
136 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
137 },
138 ]
139 )
140
141 # Internationalization
142
143 LANGUAGE_CODE = "en"
144
145 LANGUAGES = [("en", "English"), ("es", "Español")]
146
147 LOCALE_PATHS = [os.path.join(BASE_DIR, "benefits", "locale")]
148
149 USE_I18N = True
150 USE_L10N = True
151
152 TIME_ZONE = "UTC"
153 USE_TZ = True
154
155 # Static files (CSS, JavaScript, Images)
156
157 STATIC_URL = "/static/"
158 STATICFILES_DIRS = [os.path.join(BASE_DIR, "benefits", "static")]
159 STATIC_ROOT = os.path.join(BASE_DIR, "static")
160
161 # Logging configuration
162
163 LOG_LEVEL = os.environ.get("DJANGO_LOG_LEVEL", "DEBUG" if DEBUG else "WARNING")
164 LOGGING = {
165 "version": 1,
166 "disable_existing_loggers": False,
167 "formatters": {
168 "default": {
169 "format": "[{asctime}] {levelname} {name}:{lineno} {message}",
170 "datefmt": "%d/%b/%Y %H:%M:%S",
171 "style": "{",
172 },
173 },
174 "handlers": {
175 "default": {"class": "logging.StreamHandler", "formatter": "default"},
176 },
177 "root": {
178 "handlers": ["default"],
179 "level": LOG_LEVEL,
180 },
181 "loggers": {"django": {"handlers": ["default"], "propagate": False}},
182 }
183
184 # Analytics configuration
185
186 ANALYTICS_KEY = os.environ.get("ANALYTICS_KEY")
187
[end of benefits/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/benefits/settings.py b/benefits/settings.py
--- a/benefits/settings.py
+++ b/benefits/settings.py
@@ -62,11 +62,13 @@
]
)
+CSRF_COOKIE_AGE = None
+CSRF_COOKIE_SAMESITE = "Strict"
CSRF_COOKIE_HTTPONLY = True
-SESSION_COOKIE_AGE = 3600
SESSION_COOKIE_SAMESITE = "Strict"
SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies"
+SESSION_EXPIRE_AT_BROWSER_CLOSE = True
if not DEBUG:
CSRF_COOKIE_SECURE = True
|
{"golden_diff": "diff --git a/benefits/settings.py b/benefits/settings.py\n--- a/benefits/settings.py\n+++ b/benefits/settings.py\n@@ -62,11 +62,13 @@\n ]\n )\n \n+CSRF_COOKIE_AGE = None\n+CSRF_COOKIE_SAMESITE = \"Strict\"\n CSRF_COOKIE_HTTPONLY = True\n \n-SESSION_COOKIE_AGE = 3600\n SESSION_COOKIE_SAMESITE = \"Strict\"\n SESSION_ENGINE = \"django.contrib.sessions.backends.signed_cookies\"\n+SESSION_EXPIRE_AT_BROWSER_CLOSE = True\n \n if not DEBUG:\n CSRF_COOKIE_SECURE = True\n", "issue": "Secure critical Django cookies\nThe following can be configured in [`settings.py`](https://github.com/cal-itp/benefits/blob/dev/benefits/settings.py).\r\n\r\nSetting these ensures the cookies expire and are deleted when the user's browser closes (makes them session cookies)\r\n\r\n* [x] [`CSRF_COOKIE_AGE`](https://docs.djangoproject.com/en/3.2/ref/settings/#csrf-cookie-age) = None\r\n* [x] ~[`SESSION_COOKIE_AGE`](https://docs.djangoproject.com/en/3.2/ref/settings/#session-cookie-age) = None~\r\n* [x] [`SESSION_EXPIRE_AT_BROWSER_CLOSE `](https://docs.djangoproject.com/en/3.2/ref/settings/#session-cookie-age) = True\r\n\r\nWe also want to prevent the CSRF cookie from being sent in cross-browser requests. E.g. a user clicks on a link to our site, from another site - in this case we don't want any previous CSRF cookie already in their browser sent. It's unlikely to be an issue in our app since there are no user logins etc. but for consistency with our other cookies (session and language).\r\n\r\n* [x] [`CSRF_COOKIE_SAMESITE`](https://docs.djangoproject.com/en/3.2/ref/settings/#csrf-cookie-samesite) = \"Strict\"\n", "before_files": [{"content": "\"\"\"\nDjango settings for benefits project.\n\"\"\"\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ[\"DJANGO_SECRET_KEY\"]\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get(\"DJANGO_DEBUG\", \"False\").lower() == \"true\"\n\nADMIN = os.environ.get(\"DJANGO_ADMIN\", \"False\").lower() == \"true\"\n\nALLOWED_HOSTS = []\n\nif DEBUG:\n ALLOWED_HOSTS.extend([\"*\"])\nelse:\n hosts = os.environ[\"DJANGO_ALLOWED_HOSTS\"].split(\",\")\n ALLOWED_HOSTS.extend(hosts)\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.sessions\",\n \"django.contrib.staticfiles\",\n \"benefits.core\",\n \"benefits.enrollment\",\n \"benefits.eligibility\",\n]\n\nif ADMIN:\n INSTALLED_APPS.extend(\n [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.messages\",\n ]\n )\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"benefits.core.middleware.Healthcheck\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"benefits.core.middleware.DebugSession\",\n \"benefits.core.middleware.ChangedLanguageEvent\",\n]\n\nif ADMIN:\n MIDDLEWARE.extend(\n [\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n ]\n )\n\nCSRF_COOKIE_HTTPONLY = True\n\nSESSION_COOKIE_AGE = 3600\nSESSION_COOKIE_SAMESITE = \"Strict\"\nSESSION_ENGINE = \"django.contrib.sessions.backends.signed_cookies\"\n\nif not DEBUG:\n CSRF_COOKIE_SECURE = True\n CSRF_FAILURE_VIEW = \"benefits.core.views.csrf_failure\"\n SESSION_COOKIE_SECURE = True\n\nROOT_URLCONF = \"benefits.urls\"\n\ntemplate_ctx_processors = [\n \"django.template.context_processors.request\",\n \"benefits.core.context_processors.analytics\",\n]\n\nif DEBUG:\n template_ctx_processors.extend(\n [\n \"django.template.context_processors.debug\",\n \"benefits.core.context_processors.debug\",\n ]\n )\n\nif ADMIN:\n template_ctx_processors.extend(\n [\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ]\n )\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"benefits\", \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": template_ctx_processors,\n },\n },\n]\n\nWSGI_APPLICATION = \"benefits.wsgi.application\"\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": os.environ.get(\"DJANGO_DB\", \"django\") + \".db\",\n }\n}\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = []\n\nif ADMIN:\n AUTH_PASSWORD_VALIDATORS.extend(\n [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n ]\n )\n\n# Internationalization\n\nLANGUAGE_CODE = \"en\"\n\nLANGUAGES = [(\"en\", \"English\"), (\"es\", \"Espa\u00f1ol\")]\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, \"benefits\", \"locale\")]\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = \"UTC\"\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, \"benefits\", \"static\")]\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static\")\n\n# Logging configuration\n\nLOG_LEVEL = os.environ.get(\"DJANGO_LOG_LEVEL\", \"DEBUG\" if DEBUG else \"WARNING\")\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"default\": {\n \"format\": \"[{asctime}] {levelname} {name}:{lineno} {message}\",\n \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n \"style\": \"{\",\n },\n },\n \"handlers\": {\n \"default\": {\"class\": \"logging.StreamHandler\", \"formatter\": \"default\"},\n },\n \"root\": {\n \"handlers\": [\"default\"],\n \"level\": LOG_LEVEL,\n },\n \"loggers\": {\"django\": {\"handlers\": [\"default\"], \"propagate\": False}},\n}\n\n# Analytics configuration\n\nANALYTICS_KEY = os.environ.get(\"ANALYTICS_KEY\")\n", "path": "benefits/settings.py"}]}
| 2,358 | 132 |
gh_patches_debug_10094
|
rasdani/github-patches
|
git_diff
|
linz__geostore-790
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve metadata description for catalog metadata
### User Story
<!-- A user story to describe why a user wants to do something, who the user is and what they want to do -->
So that I can understand more about the catalog metadata for the Geostore, as an external data analyst or developer, I want a good description and other relevant metadata in the catalog.json files.
<!-- optional: Instead of [existing behaviour] -->
#### Acceptance Criteria
<!-- Required artifacts to accept this feature as completed. -->
- [x] Given the bucket is deployed, when a dataset is created, then the title should read "LINZ Geostore"
- [x] Given the bucket is deployed, when a dataset is created, then the description should read:
`The Geospatial Data Store (Geostore) contains all the important geospatial data and metadata held by Land Information New Zealand (LINZ).<br/>Please browse this catalog to find and access our data.<br/>For more information see: TODO - add URL to help page.`
- [ ] Given a dataset, when a new version is created, the description of the dataset 'catalog.json' should be updated from the collection.json description
- [ ] Given a dataset, when a new version is created, the title in the dataset 'catalog.json' should be updated to the title from the collection.json
#### Additional context
Note: see old data lake root catalog for ideas. See other nicely described catalogs on the web for ideas (AWS Open Data Registry for example).
<!-- Add any other context or mocked CLI commands or screenshots about the feature request here.-->
#### Tasks
<!-- Tasks needed to complete this enabler -->
- [ ] Possibly want to change dataset endpoint change 'title' to 'directory-name' (or something), update documentation (Confluence as well as Github) as well
- [ ]
#### Definition of Ready
- [ ] This story is **ready** to work on, according to the
[team's definition](https://confluence.linz.govt.nz/pages/viewpage.action?pageId=87930423)
#### Definition of Done
- [ ] This story is **done**, according to the
[team's definition](https://confluence.linz.govt.nz/pages/viewpage.action?pageId=87930423)
</issue>
<code>
[start of backend/populate_catalog/task.py]
1 from json import dumps
2 from typing import TYPE_CHECKING
3
4 import boto3
5 from pystac import STAC_IO, Catalog, CatalogType, Collection, Item # type: ignore[import]
6 from pystac.layout import HrefLayoutStrategy # type: ignore[import]
7
8 from ..api_keys import EVENT_KEY
9 from ..api_responses import BODY_KEY
10 from ..log import set_up_logging
11 from ..pystac_io_methods import read_method, write_method
12 from ..resources import ResourceName
13 from ..s3 import S3_URL_PREFIX
14 from ..sqs_message_attributes import (
15 MESSAGE_ATTRIBUTE_TYPE_DATASET,
16 MESSAGE_ATTRIBUTE_TYPE_KEY,
17 MESSAGE_ATTRIBUTE_TYPE_ROOT,
18 STRING_VALUE_KEY_LOWER,
19 )
20 from ..types import JsonObject
21
22 if TYPE_CHECKING:
23 # When type checking we want to use the third party package's stub
24 from mypy_boto3_s3 import S3Client
25 else:
26 # In production we want to avoid depending on a package which has no runtime impact
27 S3Client = object
28
29 STAC_IO.write_text_method = write_method
30 STAC_IO.read_text_method = read_method
31
32 S3_CLIENT: S3Client = boto3.client("s3")
33
34 ROOT_CATALOG_ID = "root_catalog"
35 ROOT_CATALOG_TITLE = "Geostore Root Catalog"
36 ROOT_CATALOG_DESCRIPTION = "The root catalog which links to all dataset catalogues in Geostore"
37 CATALOG_KEY = "catalog.json"
38 CONTENTS_KEY = "Contents"
39 RECORDS_KEY = "Records"
40 MESSAGE_ATTRIBUTES_KEY = "messageAttributes"
41
42 LOGGER = set_up_logging(__name__)
43
44
45 def lambda_handler(event: JsonObject, _context: bytes) -> JsonObject:
46 """Main Lambda entry point."""
47
48 LOGGER.debug(dumps({EVENT_KEY: event}))
49
50 for message in event[RECORDS_KEY]:
51 if (
52 message[MESSAGE_ATTRIBUTES_KEY][MESSAGE_ATTRIBUTE_TYPE_KEY][STRING_VALUE_KEY_LOWER]
53 == MESSAGE_ATTRIBUTE_TYPE_ROOT
54 ):
55 handle_root(message[BODY_KEY])
56 elif (
57 message[MESSAGE_ATTRIBUTES_KEY][MESSAGE_ATTRIBUTE_TYPE_KEY][STRING_VALUE_KEY_LOWER]
58 == MESSAGE_ATTRIBUTE_TYPE_DATASET
59 ):
60 handle_dataset(message[BODY_KEY])
61 else:
62 raise UnhandledSQSMessageException("Unhandled SQS message type")
63
64 return {}
65
66
67 class UnhandledSQSMessageException(Exception):
68 pass
69
70
71 class GeostoreSTACLayoutStrategy(HrefLayoutStrategy):
72 def get_catalog_href(self, cat: Catalog, parent_dir: str, is_root: bool) -> str:
73 return str(cat.get_self_href())
74
75 def get_collection_href(self, col: Collection, parent_dir: str, is_root: bool) -> str:
76 assert not is_root
77 return str(col.get_self_href())
78
79 def get_item_href(self, item: Item, parent_dir: str) -> str:
80 return str(item.get_self_href())
81
82
83 def handle_dataset(version_metadata_key: str) -> None:
84 """Handle writing a new dataset version to the dataset catalog"""
85 storage_bucket_path = f"{S3_URL_PREFIX}{ResourceName.STORAGE_BUCKET_NAME.value}"
86 dataset_prefix = version_metadata_key.split("/", maxsplit=1)[0]
87 dataset_catalog = Catalog.from_file(f"{storage_bucket_path}/{dataset_prefix}/{CATALOG_KEY}")
88
89 dataset_version_metadata = STAC_IO.read_stac_object(
90 f"{storage_bucket_path}/{version_metadata_key}"
91 )
92
93 dataset_catalog.add_child(dataset_version_metadata, strategy=GeostoreSTACLayoutStrategy())
94
95 dataset_catalog.normalize_hrefs(
96 f"{storage_bucket_path}/{dataset_prefix}", strategy=GeostoreSTACLayoutStrategy()
97 )
98 dataset_catalog.save(catalog_type=CatalogType.SELF_CONTAINED)
99
100
101 def handle_root(dataset_prefix: str) -> None:
102 """Handle writing a new dataset to the root catalog"""
103 results = S3_CLIENT.list_objects(
104 Bucket=ResourceName.STORAGE_BUCKET_NAME.value, Prefix=CATALOG_KEY
105 )
106
107 # create root catalog if it doesn't exist
108 if CONTENTS_KEY in results:
109 root_catalog = Catalog.from_file(
110 f"{S3_URL_PREFIX}{ResourceName.STORAGE_BUCKET_NAME.value}/{CATALOG_KEY}"
111 )
112
113 else:
114 root_catalog = Catalog(
115 id=ROOT_CATALOG_ID,
116 title=ROOT_CATALOG_TITLE,
117 description=ROOT_CATALOG_DESCRIPTION,
118 catalog_type=CatalogType.SELF_CONTAINED,
119 )
120 root_catalog.set_self_href(
121 f"{S3_URL_PREFIX}{ResourceName.STORAGE_BUCKET_NAME.value}/{CATALOG_KEY}"
122 )
123
124 dataset_path = f"{S3_URL_PREFIX}{ResourceName.STORAGE_BUCKET_NAME.value}/{dataset_prefix}"
125 dataset_catalog = Catalog.from_file(f"{dataset_path}/{CATALOG_KEY}")
126
127 root_catalog.add_child(dataset_catalog, strategy=GeostoreSTACLayoutStrategy())
128 root_catalog.normalize_hrefs(
129 f"{S3_URL_PREFIX}{ResourceName.STORAGE_BUCKET_NAME.value}",
130 strategy=GeostoreSTACLayoutStrategy(),
131 )
132
133 root_catalog.save(catalog_type=CatalogType.SELF_CONTAINED)
134
[end of backend/populate_catalog/task.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/backend/populate_catalog/task.py b/backend/populate_catalog/task.py
--- a/backend/populate_catalog/task.py
+++ b/backend/populate_catalog/task.py
@@ -32,8 +32,12 @@
S3_CLIENT: S3Client = boto3.client("s3")
ROOT_CATALOG_ID = "root_catalog"
-ROOT_CATALOG_TITLE = "Geostore Root Catalog"
-ROOT_CATALOG_DESCRIPTION = "The root catalog which links to all dataset catalogues in Geostore"
+ROOT_CATALOG_TITLE = "LINZ Geostore"
+ROOT_CATALOG_DESCRIPTION = (
+ "The LINZ Geospatial Data Store (Geostore) contains all the important "
+ "geospatial data held by Land Information New Zealand (LINZ).<br/>"
+ "Please browse this catalog to find and access our data."
+)
CATALOG_KEY = "catalog.json"
CONTENTS_KEY = "Contents"
RECORDS_KEY = "Records"
|
{"golden_diff": "diff --git a/backend/populate_catalog/task.py b/backend/populate_catalog/task.py\n--- a/backend/populate_catalog/task.py\n+++ b/backend/populate_catalog/task.py\n@@ -32,8 +32,12 @@\n S3_CLIENT: S3Client = boto3.client(\"s3\")\n \n ROOT_CATALOG_ID = \"root_catalog\"\n-ROOT_CATALOG_TITLE = \"Geostore Root Catalog\"\n-ROOT_CATALOG_DESCRIPTION = \"The root catalog which links to all dataset catalogues in Geostore\"\n+ROOT_CATALOG_TITLE = \"LINZ Geostore\"\n+ROOT_CATALOG_DESCRIPTION = (\n+ \"The LINZ Geospatial Data Store (Geostore) contains all the important \"\n+ \"geospatial data held by Land Information New Zealand (LINZ).<br/>\"\n+ \"Please browse this catalog to find and access our data.\"\n+)\n CATALOG_KEY = \"catalog.json\"\n CONTENTS_KEY = \"Contents\"\n RECORDS_KEY = \"Records\"\n", "issue": "Improve metadata description for catalog metadata\n### User Story\n\n<!-- A user story to describe why a user wants to do something, who the user is and what they want to do -->\n\nSo that I can understand more about the catalog metadata for the Geostore, as an external data analyst or developer, I want a good description and other relevant metadata in the catalog.json files.\n\n<!-- optional: Instead of [existing behaviour] -->\n\n#### Acceptance Criteria\n\n<!-- Required artifacts to accept this feature as completed. -->\n\n- [x] Given the bucket is deployed, when a dataset is created, then the title should read \"LINZ Geostore\"\n- [x] Given the bucket is deployed, when a dataset is created, then the description should read:\n`The Geospatial Data Store (Geostore) contains all the important geospatial data and metadata held by Land Information New Zealand (LINZ).<br/>Please browse this catalog to find and access our data.<br/>For more information see: TODO - add URL to help page.`\n- [ ] Given a dataset, when a new version is created, the description of the dataset 'catalog.json' should be updated from the collection.json description\n- [ ] Given a dataset, when a new version is created, the title in the dataset 'catalog.json' should be updated to the title from the collection.json\n\n#### Additional context\n\nNote: see old data lake root catalog for ideas. See other nicely described catalogs on the web for ideas (AWS Open Data Registry for example).\n\n<!-- Add any other context or mocked CLI commands or screenshots about the feature request here.-->\n\n#### Tasks\n\n<!-- Tasks needed to complete this enabler -->\n\n- [ ] Possibly want to change dataset endpoint change 'title' to 'directory-name' (or something), update documentation (Confluence as well as Github) as well\n- [ ] \n\n#### Definition of Ready\n\n- [ ] This story is **ready** to work on, according to the\n [team's definition](https://confluence.linz.govt.nz/pages/viewpage.action?pageId=87930423)\n\n#### Definition of Done\n\n- [ ] This story is **done**, according to the\n [team's definition](https://confluence.linz.govt.nz/pages/viewpage.action?pageId=87930423)\n\n", "before_files": [{"content": "from json import dumps\nfrom typing import TYPE_CHECKING\n\nimport boto3\nfrom pystac import STAC_IO, Catalog, CatalogType, Collection, Item # type: ignore[import]\nfrom pystac.layout import HrefLayoutStrategy # type: ignore[import]\n\nfrom ..api_keys import EVENT_KEY\nfrom ..api_responses import BODY_KEY\nfrom ..log import set_up_logging\nfrom ..pystac_io_methods import read_method, write_method\nfrom ..resources import ResourceName\nfrom ..s3 import S3_URL_PREFIX\nfrom ..sqs_message_attributes import (\n MESSAGE_ATTRIBUTE_TYPE_DATASET,\n MESSAGE_ATTRIBUTE_TYPE_KEY,\n MESSAGE_ATTRIBUTE_TYPE_ROOT,\n STRING_VALUE_KEY_LOWER,\n)\nfrom ..types import JsonObject\n\nif TYPE_CHECKING:\n # When type checking we want to use the third party package's stub\n from mypy_boto3_s3 import S3Client\nelse:\n # In production we want to avoid depending on a package which has no runtime impact\n S3Client = object\n\nSTAC_IO.write_text_method = write_method\nSTAC_IO.read_text_method = read_method\n\nS3_CLIENT: S3Client = boto3.client(\"s3\")\n\nROOT_CATALOG_ID = \"root_catalog\"\nROOT_CATALOG_TITLE = \"Geostore Root Catalog\"\nROOT_CATALOG_DESCRIPTION = \"The root catalog which links to all dataset catalogues in Geostore\"\nCATALOG_KEY = \"catalog.json\"\nCONTENTS_KEY = \"Contents\"\nRECORDS_KEY = \"Records\"\nMESSAGE_ATTRIBUTES_KEY = \"messageAttributes\"\n\nLOGGER = set_up_logging(__name__)\n\n\ndef lambda_handler(event: JsonObject, _context: bytes) -> JsonObject:\n \"\"\"Main Lambda entry point.\"\"\"\n\n LOGGER.debug(dumps({EVENT_KEY: event}))\n\n for message in event[RECORDS_KEY]:\n if (\n message[MESSAGE_ATTRIBUTES_KEY][MESSAGE_ATTRIBUTE_TYPE_KEY][STRING_VALUE_KEY_LOWER]\n == MESSAGE_ATTRIBUTE_TYPE_ROOT\n ):\n handle_root(message[BODY_KEY])\n elif (\n message[MESSAGE_ATTRIBUTES_KEY][MESSAGE_ATTRIBUTE_TYPE_KEY][STRING_VALUE_KEY_LOWER]\n == MESSAGE_ATTRIBUTE_TYPE_DATASET\n ):\n handle_dataset(message[BODY_KEY])\n else:\n raise UnhandledSQSMessageException(\"Unhandled SQS message type\")\n\n return {}\n\n\nclass UnhandledSQSMessageException(Exception):\n pass\n\n\nclass GeostoreSTACLayoutStrategy(HrefLayoutStrategy):\n def get_catalog_href(self, cat: Catalog, parent_dir: str, is_root: bool) -> str:\n return str(cat.get_self_href())\n\n def get_collection_href(self, col: Collection, parent_dir: str, is_root: bool) -> str:\n assert not is_root\n return str(col.get_self_href())\n\n def get_item_href(self, item: Item, parent_dir: str) -> str:\n return str(item.get_self_href())\n\n\ndef handle_dataset(version_metadata_key: str) -> None:\n \"\"\"Handle writing a new dataset version to the dataset catalog\"\"\"\n storage_bucket_path = f\"{S3_URL_PREFIX}{ResourceName.STORAGE_BUCKET_NAME.value}\"\n dataset_prefix = version_metadata_key.split(\"/\", maxsplit=1)[0]\n dataset_catalog = Catalog.from_file(f\"{storage_bucket_path}/{dataset_prefix}/{CATALOG_KEY}\")\n\n dataset_version_metadata = STAC_IO.read_stac_object(\n f\"{storage_bucket_path}/{version_metadata_key}\"\n )\n\n dataset_catalog.add_child(dataset_version_metadata, strategy=GeostoreSTACLayoutStrategy())\n\n dataset_catalog.normalize_hrefs(\n f\"{storage_bucket_path}/{dataset_prefix}\", strategy=GeostoreSTACLayoutStrategy()\n )\n dataset_catalog.save(catalog_type=CatalogType.SELF_CONTAINED)\n\n\ndef handle_root(dataset_prefix: str) -> None:\n \"\"\"Handle writing a new dataset to the root catalog\"\"\"\n results = S3_CLIENT.list_objects(\n Bucket=ResourceName.STORAGE_BUCKET_NAME.value, Prefix=CATALOG_KEY\n )\n\n # create root catalog if it doesn't exist\n if CONTENTS_KEY in results:\n root_catalog = Catalog.from_file(\n f\"{S3_URL_PREFIX}{ResourceName.STORAGE_BUCKET_NAME.value}/{CATALOG_KEY}\"\n )\n\n else:\n root_catalog = Catalog(\n id=ROOT_CATALOG_ID,\n title=ROOT_CATALOG_TITLE,\n description=ROOT_CATALOG_DESCRIPTION,\n catalog_type=CatalogType.SELF_CONTAINED,\n )\n root_catalog.set_self_href(\n f\"{S3_URL_PREFIX}{ResourceName.STORAGE_BUCKET_NAME.value}/{CATALOG_KEY}\"\n )\n\n dataset_path = f\"{S3_URL_PREFIX}{ResourceName.STORAGE_BUCKET_NAME.value}/{dataset_prefix}\"\n dataset_catalog = Catalog.from_file(f\"{dataset_path}/{CATALOG_KEY}\")\n\n root_catalog.add_child(dataset_catalog, strategy=GeostoreSTACLayoutStrategy())\n root_catalog.normalize_hrefs(\n f\"{S3_URL_PREFIX}{ResourceName.STORAGE_BUCKET_NAME.value}\",\n strategy=GeostoreSTACLayoutStrategy(),\n )\n\n root_catalog.save(catalog_type=CatalogType.SELF_CONTAINED)\n", "path": "backend/populate_catalog/task.py"}]}
| 2,398 | 209 |
gh_patches_debug_2251
|
rasdani/github-patches
|
git_diff
|
mirumee__ariadne-232
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update GraphQL Core Next & Starlette
Issue for me to remember to update our core dependencies to latest versions before release.
</issue>
<code>
[start of setup.py]
1 #! /usr/bin/env python
2 import os
3 from setuptools import setup
4
5 CLASSIFIERS = [
6 "Development Status :: 4 - Beta",
7 "Intended Audience :: Developers",
8 "License :: OSI Approved :: BSD License",
9 "Operating System :: OS Independent",
10 "Programming Language :: Python",
11 "Programming Language :: Python :: 3.6",
12 "Programming Language :: Python :: 3.7",
13 "Topic :: Software Development :: Libraries :: Python Modules",
14 ]
15
16 README_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md")
17 with open(README_PATH, "r") as f:
18 README = f.read()
19
20 setup(
21 name="ariadne",
22 author="Mirumee Software",
23 author_email="[email protected]",
24 description="Ariadne is a Python library for implementing GraphQL servers.",
25 long_description=README,
26 long_description_content_type="text/markdown",
27 license="BSD",
28 version="0.5.0",
29 url="https://github.com/mirumee/ariadne",
30 packages=["ariadne"],
31 include_package_data=True,
32 install_requires=[
33 "graphql-core-next>=1.0.4",
34 "python-multipart>=0.0.5",
35 "starlette<0.13",
36 "typing_extensions>=3.6.0",
37 ],
38 classifiers=CLASSIFIERS,
39 platforms=["any"],
40 zip_safe=False,
41 )
42
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -30,7 +30,7 @@
packages=["ariadne"],
include_package_data=True,
install_requires=[
- "graphql-core-next>=1.0.4",
+ "graphql-core-next<3.0.0",
"python-multipart>=0.0.5",
"starlette<0.13",
"typing_extensions>=3.6.0",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,7 +30,7 @@\n packages=[\"ariadne\"],\n include_package_data=True,\n install_requires=[\n- \"graphql-core-next>=1.0.4\",\n+ \"graphql-core-next<3.0.0\",\n \"python-multipart>=0.0.5\",\n \"starlette<0.13\",\n \"typing_extensions>=3.6.0\",\n", "issue": "Update GraphQL Core Next & Starlette\nIssue for me to remember to update our core dependencies to latest versions before release.\n", "before_files": [{"content": "#! /usr/bin/env python\nimport os\nfrom setuptools import setup\n\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nREADME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"README.md\")\nwith open(README_PATH, \"r\") as f:\n README = f.read()\n\nsetup(\n name=\"ariadne\",\n author=\"Mirumee Software\",\n author_email=\"[email protected]\",\n description=\"Ariadne is a Python library for implementing GraphQL servers.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n version=\"0.5.0\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n include_package_data=True,\n install_requires=[\n \"graphql-core-next>=1.0.4\",\n \"python-multipart>=0.0.5\",\n \"starlette<0.13\",\n \"typing_extensions>=3.6.0\",\n ],\n classifiers=CLASSIFIERS,\n platforms=[\"any\"],\n zip_safe=False,\n)\n", "path": "setup.py"}]}
| 941 | 108 |
gh_patches_debug_12194
|
rasdani/github-patches
|
git_diff
|
elastic__apm-agent-python-1687
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Elastic APM crashes when trying to inspect hits from queries while track_total_hits is False
**Describe the bug**:
The Elastic APM instrumentation for _Elasticsearch_ tries to get total number of hits in the results of a query but if the **track_total_hits** is set to **False** the `result.body['hits']` does not have a **total** key.
Fails at this line exactly with a KeyError('total'): https://github.com/elastic/apm-agent-python/blob/8b22d6a910ceda6587b99e47d8b3c9fa95c8d490/elasticapm/instrumentation/packages/elasticsearch.py#L180
**To Reproduce**
1. Instrument your python code with ElasticAPM, you don't even need to have the APM server up it crashes while collecting the data
2. Run any search query with **track_total_hits** set to **False**
`client.search(params={'track_total_hits': False}, **query_body)`
**Environment (please complete the following information)**
- OS: Linux
- Python version: 3.9
- Framework and version [e.g. Django 2.1]: Flask
- APM Server version: Irrelevent
- Agent version: 6.13.0
**Additional context**
This will crash no matter the config. We've reverted our code to use ElasticAPM 6.12 to make it work again.
</issue>
<code>
[start of elasticapm/instrumentation/packages/elasticsearch.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 from __future__ import absolute_import
32
33 import re
34 from typing import Optional
35 from urllib.parse import parse_qs, urlparse
36
37 import elasticapm
38 from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule
39 from elasticapm.traces import DroppedSpan, execution_context
40 from elasticapm.utils.logging import get_logger
41
42 logger = get_logger("elasticapm.instrument")
43
44 should_capture_body_re = re.compile("/(_search|_msearch|_count|_async_search|_sql|_eql)(/|$)")
45
46
47 class ElasticsearchConnectionInstrumentation(AbstractInstrumentedModule):
48 name = "elasticsearch_connection"
49
50 def get_instrument_list(self):
51 try:
52 import elastic_transport # noqa: F401
53
54 return [
55 ("elastic_transport._node._http_urllib3", "Urllib3HttpNode.perform_request"),
56 ("elastic_transport._node._http_requests", "RequestsHttpNode.perform_request"),
57 ]
58 except ImportError:
59 return [
60 ("elasticsearch.connection.http_urllib3", "Urllib3HttpConnection.perform_request"),
61 ("elasticsearch.connection.http_requests", "RequestsHttpConnection.perform_request"),
62 ]
63
64 def call(self, module, method, wrapped, instance, args, kwargs):
65 span = execution_context.get_span()
66 if not span or isinstance(span, DroppedSpan):
67 return wrapped(*args, **kwargs)
68
69 self._update_context_by_request_data(span.context, instance, args, kwargs)
70
71 result = wrapped(*args, **kwargs)
72 if hasattr(result, "meta"): # elasticsearch-py 8.x+
73 status_code = result.meta.status
74 cluster = result.meta.headers.get("x-found-handling-cluster")
75 else:
76 status_code = result[0]
77 cluster = result[1].get("x-found-handling-cluster")
78 span.context["http"] = {"status_code": status_code}
79 if cluster:
80 span.context["db"] = {"instance": cluster}
81
82 return result
83
84 def _update_context_by_request_data(self, context, instance, args, kwargs):
85 args_len = len(args)
86 url = args[1] if args_len > 1 else kwargs.get("url")
87 params = args[2] if args_len > 2 else kwargs.get("params")
88 body_serialized = args[3] if args_len > 3 else kwargs.get("body")
89
90 if "?" in url and not params:
91 url, qs = url.split("?", 1)
92 params = {k: v[0] for k, v in parse_qs(qs).items()}
93
94 should_capture_body = bool(should_capture_body_re.search(url))
95
96 context["db"] = {"type": "elasticsearch"}
97 if should_capture_body:
98 query = []
99 # using both q AND body is allowed in some API endpoints / ES versions,
100 # but not in others. We simply capture both if they are there so the
101 # user can see it.
102 if params and "q" in params:
103 # 'q' may already be encoded to a byte string at this point.
104 # We assume utf8, which is the default
105 q = params["q"]
106 if isinstance(q, bytes):
107 q = q.decode("utf-8", errors="replace")
108 query.append("q=" + q)
109 if body_serialized:
110 if isinstance(body_serialized, bytes):
111 query.append(body_serialized.decode("utf-8", errors="replace"))
112 else:
113 query.append(body_serialized)
114 if query:
115 context["db"]["statement"] = "\n\n".join(query)
116
117 # ES5: `host` is URL, no `port` attribute
118 # ES6, ES7: `host` URL, `hostname` is host, `port` is port
119 # ES8: `host` is hostname, no `hostname` attribute, `port` is `port`
120 if not hasattr(instance, "port"):
121 # ES5, parse hostname and port from URL stored in `host`
122 parsed_url = urlparse(instance.host)
123 host = parsed_url.hostname
124 port = parsed_url.port
125 elif not hasattr(instance, "hostname"):
126 # ES8 (and up, one can hope)
127 host = instance.host
128 port = instance.port
129 else:
130 # ES6, ES7
131 host = instance.hostname
132 port = instance.port
133
134 context["destination"] = {"address": host, "port": port}
135
136
137 class ElasticsearchTransportInstrumentation(AbstractInstrumentedModule):
138 name = "elasticsearch_connection"
139
140 def get_instrument_list(self):
141 try:
142 import elastic_transport # noqa: F401
143
144 return [
145 ("elastic_transport", "Transport.perform_request"),
146 ]
147 except ImportError:
148 return [
149 ("elasticsearch.transport", "Transport.perform_request"),
150 ]
151
152 def call(self, module, method, wrapped, instance, args, kwargs):
153 with elasticapm.capture_span(
154 self._get_signature(args, kwargs),
155 span_type="db",
156 span_subtype="elasticsearch",
157 span_action="query",
158 extra={},
159 skip_frames=2,
160 leaf=True,
161 ) as span:
162 result_data = wrapped(*args, **kwargs)
163
164 hits = self._get_hits(result_data)
165 if hits:
166 span.context["db"]["rows_affected"] = hits
167
168 return result_data
169
170 def _get_signature(self, args, kwargs):
171 args_len = len(args)
172 http_method = args[0] if args_len else kwargs.get("method")
173 http_path = args[1] if args_len > 1 else kwargs.get("url")
174 http_path = http_path.split("?", 1)[0] # we don't want to capture a potential query string in the span name
175
176 return "ES %s %s" % (http_method, http_path)
177
178 def _get_hits(self, result) -> Optional[int]:
179 if getattr(result, "body", None) and "hits" in result.body: # ES >= 8
180 return result.body["hits"]["total"]["value"]
181 elif isinstance(result, dict) and "hits" in result:
182 return (
183 result["hits"]["total"]["value"]
184 if isinstance(result["hits"]["total"], dict)
185 else result["hits"]["total"]
186 )
187
[end of elasticapm/instrumentation/packages/elasticsearch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/elasticapm/instrumentation/packages/elasticsearch.py b/elasticapm/instrumentation/packages/elasticsearch.py
--- a/elasticapm/instrumentation/packages/elasticsearch.py
+++ b/elasticapm/instrumentation/packages/elasticsearch.py
@@ -177,8 +177,8 @@
def _get_hits(self, result) -> Optional[int]:
if getattr(result, "body", None) and "hits" in result.body: # ES >= 8
- return result.body["hits"]["total"]["value"]
- elif isinstance(result, dict) and "hits" in result:
+ return result.body["hits"].get("total", {}).get("value")
+ elif isinstance(result, dict) and "hits" in result and "total" in result["hits"]:
return (
result["hits"]["total"]["value"]
if isinstance(result["hits"]["total"], dict)
|
{"golden_diff": "diff --git a/elasticapm/instrumentation/packages/elasticsearch.py b/elasticapm/instrumentation/packages/elasticsearch.py\n--- a/elasticapm/instrumentation/packages/elasticsearch.py\n+++ b/elasticapm/instrumentation/packages/elasticsearch.py\n@@ -177,8 +177,8 @@\n \n def _get_hits(self, result) -> Optional[int]:\n if getattr(result, \"body\", None) and \"hits\" in result.body: # ES >= 8\n- return result.body[\"hits\"][\"total\"][\"value\"]\n- elif isinstance(result, dict) and \"hits\" in result:\n+ return result.body[\"hits\"].get(\"total\", {}).get(\"value\")\n+ elif isinstance(result, dict) and \"hits\" in result and \"total\" in result[\"hits\"]:\n return (\n result[\"hits\"][\"total\"][\"value\"]\n if isinstance(result[\"hits\"][\"total\"], dict)\n", "issue": "Elastic APM crashes when trying to inspect hits from queries while track_total_hits is False\n**Describe the bug**: \r\n\r\nThe Elastic APM instrumentation for _Elasticsearch_ tries to get total number of hits in the results of a query but if the **track_total_hits** is set to **False** the `result.body['hits']` does not have a **total** key.\r\n\r\nFails at this line exactly with a KeyError('total'): https://github.com/elastic/apm-agent-python/blob/8b22d6a910ceda6587b99e47d8b3c9fa95c8d490/elasticapm/instrumentation/packages/elasticsearch.py#L180\r\n\r\n**To Reproduce**\r\n\r\n1. Instrument your python code with ElasticAPM, you don't even need to have the APM server up it crashes while collecting the data\r\n2. Run any search query with **track_total_hits** set to **False**\r\n`client.search(params={'track_total_hits': False}, **query_body)`\r\n\r\n**Environment (please complete the following information)**\r\n- OS: Linux\r\n- Python version: 3.9\r\n- Framework and version [e.g. Django 2.1]: Flask\r\n- APM Server version: Irrelevent\r\n- Agent version: 6.13.0\r\n\r\n\r\n**Additional context**\r\n\r\nThis will crash no matter the config. We've reverted our code to use ElasticAPM 6.12 to make it work again.\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom __future__ import absolute_import\n\nimport re\nfrom typing import Optional\nfrom urllib.parse import parse_qs, urlparse\n\nimport elasticapm\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.traces import DroppedSpan, execution_context\nfrom elasticapm.utils.logging import get_logger\n\nlogger = get_logger(\"elasticapm.instrument\")\n\nshould_capture_body_re = re.compile(\"/(_search|_msearch|_count|_async_search|_sql|_eql)(/|$)\")\n\n\nclass ElasticsearchConnectionInstrumentation(AbstractInstrumentedModule):\n name = \"elasticsearch_connection\"\n\n def get_instrument_list(self):\n try:\n import elastic_transport # noqa: F401\n\n return [\n (\"elastic_transport._node._http_urllib3\", \"Urllib3HttpNode.perform_request\"),\n (\"elastic_transport._node._http_requests\", \"RequestsHttpNode.perform_request\"),\n ]\n except ImportError:\n return [\n (\"elasticsearch.connection.http_urllib3\", \"Urllib3HttpConnection.perform_request\"),\n (\"elasticsearch.connection.http_requests\", \"RequestsHttpConnection.perform_request\"),\n ]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n span = execution_context.get_span()\n if not span or isinstance(span, DroppedSpan):\n return wrapped(*args, **kwargs)\n\n self._update_context_by_request_data(span.context, instance, args, kwargs)\n\n result = wrapped(*args, **kwargs)\n if hasattr(result, \"meta\"): # elasticsearch-py 8.x+\n status_code = result.meta.status\n cluster = result.meta.headers.get(\"x-found-handling-cluster\")\n else:\n status_code = result[0]\n cluster = result[1].get(\"x-found-handling-cluster\")\n span.context[\"http\"] = {\"status_code\": status_code}\n if cluster:\n span.context[\"db\"] = {\"instance\": cluster}\n\n return result\n\n def _update_context_by_request_data(self, context, instance, args, kwargs):\n args_len = len(args)\n url = args[1] if args_len > 1 else kwargs.get(\"url\")\n params = args[2] if args_len > 2 else kwargs.get(\"params\")\n body_serialized = args[3] if args_len > 3 else kwargs.get(\"body\")\n\n if \"?\" in url and not params:\n url, qs = url.split(\"?\", 1)\n params = {k: v[0] for k, v in parse_qs(qs).items()}\n\n should_capture_body = bool(should_capture_body_re.search(url))\n\n context[\"db\"] = {\"type\": \"elasticsearch\"}\n if should_capture_body:\n query = []\n # using both q AND body is allowed in some API endpoints / ES versions,\n # but not in others. We simply capture both if they are there so the\n # user can see it.\n if params and \"q\" in params:\n # 'q' may already be encoded to a byte string at this point.\n # We assume utf8, which is the default\n q = params[\"q\"]\n if isinstance(q, bytes):\n q = q.decode(\"utf-8\", errors=\"replace\")\n query.append(\"q=\" + q)\n if body_serialized:\n if isinstance(body_serialized, bytes):\n query.append(body_serialized.decode(\"utf-8\", errors=\"replace\"))\n else:\n query.append(body_serialized)\n if query:\n context[\"db\"][\"statement\"] = \"\\n\\n\".join(query)\n\n # ES5: `host` is URL, no `port` attribute\n # ES6, ES7: `host` URL, `hostname` is host, `port` is port\n # ES8: `host` is hostname, no `hostname` attribute, `port` is `port`\n if not hasattr(instance, \"port\"):\n # ES5, parse hostname and port from URL stored in `host`\n parsed_url = urlparse(instance.host)\n host = parsed_url.hostname\n port = parsed_url.port\n elif not hasattr(instance, \"hostname\"):\n # ES8 (and up, one can hope)\n host = instance.host\n port = instance.port\n else:\n # ES6, ES7\n host = instance.hostname\n port = instance.port\n\n context[\"destination\"] = {\"address\": host, \"port\": port}\n\n\nclass ElasticsearchTransportInstrumentation(AbstractInstrumentedModule):\n name = \"elasticsearch_connection\"\n\n def get_instrument_list(self):\n try:\n import elastic_transport # noqa: F401\n\n return [\n (\"elastic_transport\", \"Transport.perform_request\"),\n ]\n except ImportError:\n return [\n (\"elasticsearch.transport\", \"Transport.perform_request\"),\n ]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n with elasticapm.capture_span(\n self._get_signature(args, kwargs),\n span_type=\"db\",\n span_subtype=\"elasticsearch\",\n span_action=\"query\",\n extra={},\n skip_frames=2,\n leaf=True,\n ) as span:\n result_data = wrapped(*args, **kwargs)\n\n hits = self._get_hits(result_data)\n if hits:\n span.context[\"db\"][\"rows_affected\"] = hits\n\n return result_data\n\n def _get_signature(self, args, kwargs):\n args_len = len(args)\n http_method = args[0] if args_len else kwargs.get(\"method\")\n http_path = args[1] if args_len > 1 else kwargs.get(\"url\")\n http_path = http_path.split(\"?\", 1)[0] # we don't want to capture a potential query string in the span name\n\n return \"ES %s %s\" % (http_method, http_path)\n\n def _get_hits(self, result) -> Optional[int]:\n if getattr(result, \"body\", None) and \"hits\" in result.body: # ES >= 8\n return result.body[\"hits\"][\"total\"][\"value\"]\n elif isinstance(result, dict) and \"hits\" in result:\n return (\n result[\"hits\"][\"total\"][\"value\"]\n if isinstance(result[\"hits\"][\"total\"], dict)\n else result[\"hits\"][\"total\"]\n )\n", "path": "elasticapm/instrumentation/packages/elasticsearch.py"}]}
| 3,019 | 199 |
gh_patches_debug_32884
|
rasdani/github-patches
|
git_diff
|
TabbycatDebate__tabbycat-1955
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add button for direct preformed panel allocation
As an alternative for the Hungarian-based algorithm, as it is somewhat common to just want all in the same order of the panels, and deal with conflicts afterwards.
</issue>
<code>
[start of tabbycat/adjallocation/consumers.py]
1 import logging
2 from itertools import groupby
3 from operator import attrgetter
4
5 from django.db.models import F
6 from django.utils.safestring import mark_safe
7 from django.utils.translation import gettext as _, ngettext
8
9 from actionlog.models import ActionLogEntry
10 from breakqual.utils import calculate_live_thresholds
11 from draw.consumers import BaseAdjudicatorContainerConsumer, EditDebateOrPanelWorkerMixin
12 from participants.prefetch import populate_win_counts
13 from tournaments.models import Round
14
15 from .allocators.base import AdjudicatorAllocationError
16 from .allocators.hungarian import ConsensusHungarianAllocator, VotingHungarianAllocator
17 from .models import PreformedPanel
18 from .preformed import copy_panels_to_debates
19 from .preformed.anticipated import calculate_anticipated_draw
20 from .preformed.hungarian import HungarianPreformedPanelAllocator
21 from .serializers import (EditPanelAdjsPanelSerializer,
22 SimpleDebateAllocationSerializer, SimpleDebateImportanceSerializer,
23 SimplePanelAllocationSerializer, SimplePanelImportanceSerializer)
24
25 logger = logging.getLogger(__name__)
26
27
28 class PanelEditConsumer(BaseAdjudicatorContainerConsumer):
29 group_prefix = 'panels'
30 model = PreformedPanel
31 importance_serializer = SimplePanelImportanceSerializer
32 adjudicators_serializer = SimplePanelAllocationSerializer
33
34
35 class AdjudicatorAllocationWorkerConsumer(EditDebateOrPanelWorkerMixin):
36
37 def _apply_allocation_settings(self, round, settings):
38 t = round.tournament
39 for key, value in settings.items():
40 if key == "usePreformedPanels":
41 # Passing this here is much easier than splitting the function
42 continue # (Not actually a preference; just a toggle from Vue)
43 # No way to force front-end to only accept floats/integers :(
44 if isinstance(t.preferences[key], bool):
45 t.preferences[key] = bool(value)
46 elif isinstance(t.preferences[key], int):
47 t.preferences[key] = int(value)
48 elif isinstance(t.preferences[key], float):
49 t.preferences[key] = float(value)
50 else:
51 t.preferences[key] = value
52
53 def allocate_debate_adjs(self, event):
54 round = Round.objects.get(pk=event['extra']['round_id'])
55 self._apply_allocation_settings(round, event['extra']['settings'])
56
57 if round.draw_status == round.STATUS_RELEASED:
58 self.return_error(event['extra']['group_name'],
59 _("Draw is already released, unrelease draw to redo auto-allocations."))
60 return
61 if round.draw_status != round.STATUS_CONFIRMED:
62 self.return_error(event['extra']['group_name'],
63 _("Draw is not confirmed, confirm draw to run auto-allocations."))
64 return
65
66 if event['extra']['settings']['usePreformedPanels']:
67 if not round.preformedpanel_set.exists():
68 self.return_error(event['extra']['group_name'],
69 _("There are no preformed panels available to allocate."))
70 return
71
72 logger.info("Preformed panels exist, allocating panels to debates")
73
74 debates = round.debate_set.all()
75 panels = round.preformedpanel_set.all()
76 allocator = HungarianPreformedPanelAllocator(debates, panels, round)
77
78 debates, panels = allocator.allocate()
79 copy_panels_to_debates(debates, panels)
80
81 self.log_action(event['extra'], round, ActionLogEntry.ACTION_TYPE_PREFORMED_PANELS_DEBATES_AUTO)
82
83 msg = _("Successfully auto-allocated preformed panels to debates.")
84 level = 'success'
85
86 else:
87 logger.info("Allocating debate adjudicators using traditional allocator")
88
89 debates = round.debate_set.all()
90 adjs = round.active_adjudicators.all()
91
92 try:
93 if round.ballots_per_debate == 'per-adj':
94 allocator = VotingHungarianAllocator(debates, adjs, round)
95 else:
96 allocator = ConsensusHungarianAllocator(debates, adjs, round)
97 allocation, user_warnings = allocator.allocate()
98 except AdjudicatorAllocationError as e:
99 self.return_error(event['extra']['group_name'], str(e))
100 return
101
102 for alloc in allocation:
103 alloc.save()
104
105 self.log_action(event['extra'], round, ActionLogEntry.ACTION_TYPE_ADJUDICATORS_AUTO)
106
107 if user_warnings:
108 msg = ngettext(
109 "Successfully auto-allocated adjudicators to debates. However, there was a warning:",
110 "Successfully auto-allocated adjudicators to debates. However, there were %(count)d warnings:",
111 len(user_warnings)) % {'count': len(user_warnings)}
112 msg = "<div>" + msg + "</div><ul class=\"mt-1 mb-0\"><li>" + "</li><li>".join(user_warnings) + "</li></ul>"
113 level = 'warning'
114 else:
115 msg = _("Successfully auto-allocated adjudicators to debates.")
116 level = 'success'
117
118 # TODO: return debates directly from allocator function?
119 content = self.reserialize_debates(SimpleDebateAllocationSerializer, round)
120
121 self.return_response(content, event['extra']['group_name'], msg, level)
122
123 def allocate_panel_adjs(self, event):
124 round = Round.objects.get(pk=event['extra']['round_id'])
125 self._apply_allocation_settings(round, event['extra']['settings'])
126
127 panels = round.preformedpanel_set.all()
128
129 if not panels.exists():
130 self.return_error(event['extra']['group_name'],
131 _("There aren't any panels to fill. Create panels first."))
132 return
133
134 adjs = round.active_adjudicators.all()
135
136 try:
137 if round.ballots_per_debate == 'per-adj':
138 allocator = VotingHungarianAllocator(panels, adjs, round)
139 else:
140 allocator = ConsensusHungarianAllocator(panels, adjs, round)
141
142 allocation, user_warnings = allocator.allocate()
143 except AdjudicatorAllocationError as e:
144 self.return_error(event['extra']['group_name'], str(e))
145 return
146
147 for alloc in allocation:
148 alloc.save()
149
150 self.log_action(event['extra'], round, ActionLogEntry.ACTION_TYPE_PREFORMED_PANELS_ADJUDICATOR_AUTO)
151 content = self.reserialize_panels(SimplePanelAllocationSerializer, round)
152
153 if user_warnings:
154 msg = ngettext(
155 "Successfully auto-allocated adjudicators to preformed panels. However, there was a warning:",
156 "Successfully auto-allocated adjudicators to preformed panels. However, there were %(count)d warnings:",
157 len(user_warnings)) % {'count': len(user_warnings)}
158 msg = "<div>" + msg + "</div><ul class=\"mt-1 mb-0\"><li>" + "</li><li>".join(user_warnings) + "</li></ul>"
159 level = 'warning'
160 else:
161 msg = _("Successfully auto-allocated adjudicators to preformed panels.")
162 level = 'success'
163
164 self.return_response(content, event['extra']['group_name'], mark_safe(msg), level)
165
166 def _prioritise_by_bracket(self, instances, bracket_attrname):
167 instances = instances.order_by('-' + bracket_attrname)
168 nimportancelevels = 4
169 importance = 1
170 boundary = round(len(instances) / nimportancelevels)
171 n = 0
172 for k, group in groupby(instances, key=attrgetter(bracket_attrname)):
173 group = list(group)
174 for panel in group:
175 panel.importance = importance
176 panel.save()
177 n += len(group)
178 if n >= boundary:
179 importance -= 1
180 boundary = round((nimportancelevels - 2 - importance) * len(instances) / nimportancelevels)
181
182 def prioritise_debates(self, event):
183 # TODO: Debates and panels should really be unified in a single function
184 round = Round.objects.get(pk=event['extra']['round_id'])
185 debates = round.debate_set_with_prefetches(teams=True, adjudicators=False,
186 speakers=False, venues=False)
187
188 priority_method = event['extra']['settings']['type']
189 if priority_method == 'liveness':
190 populate_win_counts([team for debate in debates for team in debate.teams], round.prev)
191 open_category = round.tournament.breakcategory_set.filter(is_general=True).first()
192 if open_category:
193 safe, dead = calculate_live_thresholds(open_category, round.tournament, round)
194 for debate in debates:
195 points_now = [team.points_count for team in debate.teams]
196 highest = max(points_now)
197 lowest = min(points_now)
198 if lowest >= safe:
199 debate.importance = 0
200 elif highest <= dead:
201 debate.importance = -2
202 else:
203 debate.importance = 1
204 debate.save()
205 else:
206 self.return_error(event['extra']['group_name'],
207 _("You have no break category set as 'is general' so debate importances can't be calculated."))
208 return
209
210 elif priority_method == 'bracket':
211 self._prioritise_by_bracket(debates, 'bracket')
212
213 self.log_action(event['extra'], round, ActionLogEntry.ACTION_TYPE_DEBATE_IMPORTANCE_AUTO)
214 content = self.reserialize_debates(SimpleDebateImportanceSerializer, round, debates)
215 msg = _("Succesfully auto-prioritised debates.")
216 self.return_response(content, event['extra']['group_name'], msg, 'success')
217
218 def prioritise_panels(self, event):
219 rd = Round.objects.get(pk=event['extra']['round_id'])
220 panels = rd.preformedpanel_set.all()
221 priority_method = event['extra']['settings']['type']
222
223 if priority_method == 'liveness':
224 open_category = rd.tournament.breakcategory_set.filter(is_general=True).first()
225 if open_category:
226 safe, dead = calculate_live_thresholds(open_category, rd.tournament, rd)
227 for panel in panels:
228 if panel.liveness > 0:
229 panel.importance = 1
230 elif panel.bracket_min >= safe:
231 panel.importance = 0
232 else:
233 panel.importance = -2
234 panel.save()
235 else:
236 self.return_error(event['extra']['group_name'],
237 _("You have no break category set as 'is general' so panel importances can't be calculated."))
238 return
239
240 elif priority_method == 'bracket':
241 panels = panels.annotate(bracket_mid=(F('bracket_max') + F('bracket_min')) / 2)
242 self._prioritise_by_bracket(panels, 'bracket_mid')
243
244 self.log_action(event['extra'], rd, ActionLogEntry.ACTION_TYPE_PREFORMED_PANELS_IMPORTANCE_AUTO)
245 content = self.reserialize_panels(SimplePanelImportanceSerializer, rd, panels)
246 msg = _("Succesfully auto-prioritised preformed panels.")
247 self.return_response(content, event['extra']['group_name'], msg, 'success')
248
249 def create_preformed_panels(self, event):
250 round = Round.objects.get(pk=event['extra']['round_id'])
251 for i, (bracket_min, bracket_max, liveness) in enumerate(
252 calculate_anticipated_draw(round), start=1):
253 PreformedPanel.objects.update_or_create(round=round, room_rank=i,
254 defaults={
255 'bracket_max': bracket_max,
256 'bracket_min': bracket_min,
257 'liveness': liveness,
258 })
259
260 self.log_action(event['extra'], round, ActionLogEntry.ACTION_TYPE_PREFORMED_PANELS_CREATE)
261 content = self.reserialize_panels(EditPanelAdjsPanelSerializer, round)
262
263 if round.prev is None:
264 msg, level = _("Since this is the first round, the preformed panels aren't annotated "
265 "with brackets and liveness."), 'warning'
266 elif not round.prev.debate_set.exists():
267 msg, level = _("The previous round's draw doesn't exist, so preformed panels can't be "
268 "annotated with brackets and liveness."), 'warning'
269 else:
270 msg, level = _("Succesfully created new preformed panels for this round."), 'success'
271
272 self.return_response(content, event['extra']['group_name'], msg, level)
273
[end of tabbycat/adjallocation/consumers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/tabbycat/adjallocation/consumers.py b/tabbycat/adjallocation/consumers.py
--- a/tabbycat/adjallocation/consumers.py
+++ b/tabbycat/adjallocation/consumers.py
@@ -17,6 +17,7 @@
from .models import PreformedPanel
from .preformed import copy_panels_to_debates
from .preformed.anticipated import calculate_anticipated_draw
+from .preformed.direct import DirectPreformedPanelAllocator
from .preformed.hungarian import HungarianPreformedPanelAllocator
from .serializers import (EditPanelAdjsPanelSerializer,
SimpleDebateAllocationSerializer, SimpleDebateImportanceSerializer,
@@ -37,7 +38,7 @@
def _apply_allocation_settings(self, round, settings):
t = round.tournament
for key, value in settings.items():
- if key == "usePreformedPanels":
+ if key in ("usePreformedPanels", "allocationMethod"):
# Passing this here is much easier than splitting the function
continue # (Not actually a preference; just a toggle from Vue)
# No way to force front-end to only accept floats/integers :(
@@ -73,7 +74,10 @@
debates = round.debate_set.all()
panels = round.preformedpanel_set.all()
- allocator = HungarianPreformedPanelAllocator(debates, panels, round)
+ if event['extra']['settings']['allocationMethod'] == 'hungarian':
+ allocator = HungarianPreformedPanelAllocator(debates, panels, round)
+ else:
+ allocator = DirectPreformedPanelAllocator(debates, panels, round)
debates, panels = allocator.allocate()
copy_panels_to_debates(debates, panels)
|
{"golden_diff": "diff --git a/tabbycat/adjallocation/consumers.py b/tabbycat/adjallocation/consumers.py\n--- a/tabbycat/adjallocation/consumers.py\n+++ b/tabbycat/adjallocation/consumers.py\n@@ -17,6 +17,7 @@\n from .models import PreformedPanel\n from .preformed import copy_panels_to_debates\n from .preformed.anticipated import calculate_anticipated_draw\n+from .preformed.direct import DirectPreformedPanelAllocator\n from .preformed.hungarian import HungarianPreformedPanelAllocator\n from .serializers import (EditPanelAdjsPanelSerializer,\n SimpleDebateAllocationSerializer, SimpleDebateImportanceSerializer,\n@@ -37,7 +38,7 @@\n def _apply_allocation_settings(self, round, settings):\n t = round.tournament\n for key, value in settings.items():\n- if key == \"usePreformedPanels\":\n+ if key in (\"usePreformedPanels\", \"allocationMethod\"):\n # Passing this here is much easier than splitting the function\n continue # (Not actually a preference; just a toggle from Vue)\n # No way to force front-end to only accept floats/integers :(\n@@ -73,7 +74,10 @@\n \n debates = round.debate_set.all()\n panels = round.preformedpanel_set.all()\n- allocator = HungarianPreformedPanelAllocator(debates, panels, round)\n+ if event['extra']['settings']['allocationMethod'] == 'hungarian':\n+ allocator = HungarianPreformedPanelAllocator(debates, panels, round)\n+ else:\n+ allocator = DirectPreformedPanelAllocator(debates, panels, round)\n \n debates, panels = allocator.allocate()\n copy_panels_to_debates(debates, panels)\n", "issue": "Add button for direct preformed panel allocation\nAs an alternative for the Hungarian-based algorithm, as it is somewhat common to just want all in the same order of the panels, and deal with conflicts afterwards.\n", "before_files": [{"content": "import logging\nfrom itertools import groupby\nfrom operator import attrgetter\n\nfrom django.db.models import F\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext as _, ngettext\n\nfrom actionlog.models import ActionLogEntry\nfrom breakqual.utils import calculate_live_thresholds\nfrom draw.consumers import BaseAdjudicatorContainerConsumer, EditDebateOrPanelWorkerMixin\nfrom participants.prefetch import populate_win_counts\nfrom tournaments.models import Round\n\nfrom .allocators.base import AdjudicatorAllocationError\nfrom .allocators.hungarian import ConsensusHungarianAllocator, VotingHungarianAllocator\nfrom .models import PreformedPanel\nfrom .preformed import copy_panels_to_debates\nfrom .preformed.anticipated import calculate_anticipated_draw\nfrom .preformed.hungarian import HungarianPreformedPanelAllocator\nfrom .serializers import (EditPanelAdjsPanelSerializer,\n SimpleDebateAllocationSerializer, SimpleDebateImportanceSerializer,\n SimplePanelAllocationSerializer, SimplePanelImportanceSerializer)\n\nlogger = logging.getLogger(__name__)\n\n\nclass PanelEditConsumer(BaseAdjudicatorContainerConsumer):\n group_prefix = 'panels'\n model = PreformedPanel\n importance_serializer = SimplePanelImportanceSerializer\n adjudicators_serializer = SimplePanelAllocationSerializer\n\n\nclass AdjudicatorAllocationWorkerConsumer(EditDebateOrPanelWorkerMixin):\n\n def _apply_allocation_settings(self, round, settings):\n t = round.tournament\n for key, value in settings.items():\n if key == \"usePreformedPanels\":\n # Passing this here is much easier than splitting the function\n continue # (Not actually a preference; just a toggle from Vue)\n # No way to force front-end to only accept floats/integers :(\n if isinstance(t.preferences[key], bool):\n t.preferences[key] = bool(value)\n elif isinstance(t.preferences[key], int):\n t.preferences[key] = int(value)\n elif isinstance(t.preferences[key], float):\n t.preferences[key] = float(value)\n else:\n t.preferences[key] = value\n\n def allocate_debate_adjs(self, event):\n round = Round.objects.get(pk=event['extra']['round_id'])\n self._apply_allocation_settings(round, event['extra']['settings'])\n\n if round.draw_status == round.STATUS_RELEASED:\n self.return_error(event['extra']['group_name'],\n _(\"Draw is already released, unrelease draw to redo auto-allocations.\"))\n return\n if round.draw_status != round.STATUS_CONFIRMED:\n self.return_error(event['extra']['group_name'],\n _(\"Draw is not confirmed, confirm draw to run auto-allocations.\"))\n return\n\n if event['extra']['settings']['usePreformedPanels']:\n if not round.preformedpanel_set.exists():\n self.return_error(event['extra']['group_name'],\n _(\"There are no preformed panels available to allocate.\"))\n return\n\n logger.info(\"Preformed panels exist, allocating panels to debates\")\n\n debates = round.debate_set.all()\n panels = round.preformedpanel_set.all()\n allocator = HungarianPreformedPanelAllocator(debates, panels, round)\n\n debates, panels = allocator.allocate()\n copy_panels_to_debates(debates, panels)\n\n self.log_action(event['extra'], round, ActionLogEntry.ACTION_TYPE_PREFORMED_PANELS_DEBATES_AUTO)\n\n msg = _(\"Successfully auto-allocated preformed panels to debates.\")\n level = 'success'\n\n else:\n logger.info(\"Allocating debate adjudicators using traditional allocator\")\n\n debates = round.debate_set.all()\n adjs = round.active_adjudicators.all()\n\n try:\n if round.ballots_per_debate == 'per-adj':\n allocator = VotingHungarianAllocator(debates, adjs, round)\n else:\n allocator = ConsensusHungarianAllocator(debates, adjs, round)\n allocation, user_warnings = allocator.allocate()\n except AdjudicatorAllocationError as e:\n self.return_error(event['extra']['group_name'], str(e))\n return\n\n for alloc in allocation:\n alloc.save()\n\n self.log_action(event['extra'], round, ActionLogEntry.ACTION_TYPE_ADJUDICATORS_AUTO)\n\n if user_warnings:\n msg = ngettext(\n \"Successfully auto-allocated adjudicators to debates. However, there was a warning:\",\n \"Successfully auto-allocated adjudicators to debates. However, there were %(count)d warnings:\",\n len(user_warnings)) % {'count': len(user_warnings)}\n msg = \"<div>\" + msg + \"</div><ul class=\\\"mt-1 mb-0\\\"><li>\" + \"</li><li>\".join(user_warnings) + \"</li></ul>\"\n level = 'warning'\n else:\n msg = _(\"Successfully auto-allocated adjudicators to debates.\")\n level = 'success'\n\n # TODO: return debates directly from allocator function?\n content = self.reserialize_debates(SimpleDebateAllocationSerializer, round)\n\n self.return_response(content, event['extra']['group_name'], msg, level)\n\n def allocate_panel_adjs(self, event):\n round = Round.objects.get(pk=event['extra']['round_id'])\n self._apply_allocation_settings(round, event['extra']['settings'])\n\n panels = round.preformedpanel_set.all()\n\n if not panels.exists():\n self.return_error(event['extra']['group_name'],\n _(\"There aren't any panels to fill. Create panels first.\"))\n return\n\n adjs = round.active_adjudicators.all()\n\n try:\n if round.ballots_per_debate == 'per-adj':\n allocator = VotingHungarianAllocator(panels, adjs, round)\n else:\n allocator = ConsensusHungarianAllocator(panels, adjs, round)\n\n allocation, user_warnings = allocator.allocate()\n except AdjudicatorAllocationError as e:\n self.return_error(event['extra']['group_name'], str(e))\n return\n\n for alloc in allocation:\n alloc.save()\n\n self.log_action(event['extra'], round, ActionLogEntry.ACTION_TYPE_PREFORMED_PANELS_ADJUDICATOR_AUTO)\n content = self.reserialize_panels(SimplePanelAllocationSerializer, round)\n\n if user_warnings:\n msg = ngettext(\n \"Successfully auto-allocated adjudicators to preformed panels. However, there was a warning:\",\n \"Successfully auto-allocated adjudicators to preformed panels. However, there were %(count)d warnings:\",\n len(user_warnings)) % {'count': len(user_warnings)}\n msg = \"<div>\" + msg + \"</div><ul class=\\\"mt-1 mb-0\\\"><li>\" + \"</li><li>\".join(user_warnings) + \"</li></ul>\"\n level = 'warning'\n else:\n msg = _(\"Successfully auto-allocated adjudicators to preformed panels.\")\n level = 'success'\n\n self.return_response(content, event['extra']['group_name'], mark_safe(msg), level)\n\n def _prioritise_by_bracket(self, instances, bracket_attrname):\n instances = instances.order_by('-' + bracket_attrname)\n nimportancelevels = 4\n importance = 1\n boundary = round(len(instances) / nimportancelevels)\n n = 0\n for k, group in groupby(instances, key=attrgetter(bracket_attrname)):\n group = list(group)\n for panel in group:\n panel.importance = importance\n panel.save()\n n += len(group)\n if n >= boundary:\n importance -= 1\n boundary = round((nimportancelevels - 2 - importance) * len(instances) / nimportancelevels)\n\n def prioritise_debates(self, event):\n # TODO: Debates and panels should really be unified in a single function\n round = Round.objects.get(pk=event['extra']['round_id'])\n debates = round.debate_set_with_prefetches(teams=True, adjudicators=False,\n speakers=False, venues=False)\n\n priority_method = event['extra']['settings']['type']\n if priority_method == 'liveness':\n populate_win_counts([team for debate in debates for team in debate.teams], round.prev)\n open_category = round.tournament.breakcategory_set.filter(is_general=True).first()\n if open_category:\n safe, dead = calculate_live_thresholds(open_category, round.tournament, round)\n for debate in debates:\n points_now = [team.points_count for team in debate.teams]\n highest = max(points_now)\n lowest = min(points_now)\n if lowest >= safe:\n debate.importance = 0\n elif highest <= dead:\n debate.importance = -2\n else:\n debate.importance = 1\n debate.save()\n else:\n self.return_error(event['extra']['group_name'],\n _(\"You have no break category set as 'is general' so debate importances can't be calculated.\"))\n return\n\n elif priority_method == 'bracket':\n self._prioritise_by_bracket(debates, 'bracket')\n\n self.log_action(event['extra'], round, ActionLogEntry.ACTION_TYPE_DEBATE_IMPORTANCE_AUTO)\n content = self.reserialize_debates(SimpleDebateImportanceSerializer, round, debates)\n msg = _(\"Succesfully auto-prioritised debates.\")\n self.return_response(content, event['extra']['group_name'], msg, 'success')\n\n def prioritise_panels(self, event):\n rd = Round.objects.get(pk=event['extra']['round_id'])\n panels = rd.preformedpanel_set.all()\n priority_method = event['extra']['settings']['type']\n\n if priority_method == 'liveness':\n open_category = rd.tournament.breakcategory_set.filter(is_general=True).first()\n if open_category:\n safe, dead = calculate_live_thresholds(open_category, rd.tournament, rd)\n for panel in panels:\n if panel.liveness > 0:\n panel.importance = 1\n elif panel.bracket_min >= safe:\n panel.importance = 0\n else:\n panel.importance = -2\n panel.save()\n else:\n self.return_error(event['extra']['group_name'],\n _(\"You have no break category set as 'is general' so panel importances can't be calculated.\"))\n return\n\n elif priority_method == 'bracket':\n panels = panels.annotate(bracket_mid=(F('bracket_max') + F('bracket_min')) / 2)\n self._prioritise_by_bracket(panels, 'bracket_mid')\n\n self.log_action(event['extra'], rd, ActionLogEntry.ACTION_TYPE_PREFORMED_PANELS_IMPORTANCE_AUTO)\n content = self.reserialize_panels(SimplePanelImportanceSerializer, rd, panels)\n msg = _(\"Succesfully auto-prioritised preformed panels.\")\n self.return_response(content, event['extra']['group_name'], msg, 'success')\n\n def create_preformed_panels(self, event):\n round = Round.objects.get(pk=event['extra']['round_id'])\n for i, (bracket_min, bracket_max, liveness) in enumerate(\n calculate_anticipated_draw(round), start=1):\n PreformedPanel.objects.update_or_create(round=round, room_rank=i,\n defaults={\n 'bracket_max': bracket_max,\n 'bracket_min': bracket_min,\n 'liveness': liveness,\n })\n\n self.log_action(event['extra'], round, ActionLogEntry.ACTION_TYPE_PREFORMED_PANELS_CREATE)\n content = self.reserialize_panels(EditPanelAdjsPanelSerializer, round)\n\n if round.prev is None:\n msg, level = _(\"Since this is the first round, the preformed panels aren't annotated \"\n \"with brackets and liveness.\"), 'warning'\n elif not round.prev.debate_set.exists():\n msg, level = _(\"The previous round's draw doesn't exist, so preformed panels can't be \"\n \"annotated with brackets and liveness.\"), 'warning'\n else:\n msg, level = _(\"Succesfully created new preformed panels for this round.\"), 'success'\n\n self.return_response(content, event['extra']['group_name'], msg, level)\n", "path": "tabbycat/adjallocation/consumers.py"}]}
| 3,896 | 391 |
gh_patches_debug_56857
|
rasdani/github-patches
|
git_diff
|
spesmilo__electrum-1112
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hardcoded icon path in launcher for GNU/Linux systems
Hi,
currently the icon path specified in the created _.desktop_ launcher file is set to:
_Icon=/usr/share/app-install/icons/electrum.png_
(see https://github.com/spesmilo/electrum/blob/bc3013caf0d3d6a35290e9cc9e51125b7d03d14c/electrum.desktop)
This prevents icon theming without editing the launcher file.
I'd like to ask you to change the icon location as well as the icon path line in the launcher according to the freedesktop.org standards.
Please find more info here:
http://standards.freedesktop.org/icon-theme-spec/icon-theme-spec-latest.html
https://github.com/Foggalong/hardcode-fixer/wiki/What,-Why-&-How
---
According to the above resources standard icon locations are:
_/usr/share/icons/hicolor/[size]/apps/[icon name]_
_~/.local/share/icons/hicolor/[size]/apps/[icon name]_
_/usr/share/pixmaps/[icon name]_
The standard icon line in the .desktop launcher is:
_Icon=[icon name]_ (without full path, without file extension)
for instance
**Icon=electrum**
(for an icon _electrum.svg_ or _electrum.png_ stored in standard location)
Could you please move the icon to one of those locations and adjust the icon path in the .desktop file accordingly?
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/python
2
3 # python setup.py sdist --format=zip,gztar
4
5 from setuptools import setup
6 import os
7 import sys
8 import platform
9 import imp
10
11
12 version = imp.load_source('version', 'lib/version.py')
13
14 if sys.version_info[:3] < (2, 7, 0):
15 sys.exit("Error: Electrum requires Python version >= 2.7.0...")
16
17
18
19 data_files = []
20 if platform.system() == 'Linux' or platform.system() == 'FreeBSD':
21 usr_share = os.path.join(sys.prefix, "share")
22 data_files += [
23 (os.path.join(usr_share, 'applications/'), ['electrum.desktop']),
24 (os.path.join(usr_share, 'app-install', 'icons/'), ['icons/electrum.png'])
25 ]
26
27
28 setup(
29 name="Electrum",
30 version=version.ELECTRUM_VERSION,
31 install_requires=[
32 'slowaes>=0.1a1',
33 'ecdsa>=0.9',
34 'pbkdf2',
35 'requests',
36 'pyasn1-modules',
37 'pyasn1',
38 'qrcode',
39 'protobuf',
40 'tlslite',
41 'dnspython',
42 ],
43 package_dir={
44 'electrum': 'lib',
45 'electrum_gui': 'gui',
46 'electrum_plugins': 'plugins',
47 },
48 packages=['electrum','electrum_gui','electrum_gui.qt','electrum_plugins'],
49 package_data={
50 'electrum': [
51 'wordlist/*.txt',
52 'locale/*/LC_MESSAGES/electrum.mo',
53 ],
54 'electrum_gui': [
55 "qt/themes/cleanlook/name.cfg",
56 "qt/themes/cleanlook/style.css",
57 "qt/themes/sahara/name.cfg",
58 "qt/themes/sahara/style.css",
59 "qt/themes/dark/name.cfg",
60 "qt/themes/dark/style.css",
61 ]
62 },
63 scripts=['electrum'],
64 data_files=data_files,
65 description="Lightweight Bitcoin Wallet",
66 author="Thomas Voegtlin",
67 author_email="[email protected]",
68 license="GNU GPLv3",
69 url="https://electrum.org",
70 long_description="""Lightweight Bitcoin Wallet"""
71 )
72
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -21,7 +21,7 @@
usr_share = os.path.join(sys.prefix, "share")
data_files += [
(os.path.join(usr_share, 'applications/'), ['electrum.desktop']),
- (os.path.join(usr_share, 'app-install', 'icons/'), ['icons/electrum.png'])
+ (os.path.join(usr_share, 'pixmaps/'), ['icons/electrum.png'])
]
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -21,7 +21,7 @@\n usr_share = os.path.join(sys.prefix, \"share\")\n data_files += [\n (os.path.join(usr_share, 'applications/'), ['electrum.desktop']),\n- (os.path.join(usr_share, 'app-install', 'icons/'), ['icons/electrum.png'])\n+ (os.path.join(usr_share, 'pixmaps/'), ['icons/electrum.png'])\n ]\n", "issue": "Hardcoded icon path in launcher for GNU/Linux systems\nHi,\n\ncurrently the icon path specified in the created _.desktop_ launcher file is set to: \n_Icon=/usr/share/app-install/icons/electrum.png_ \n\n(see https://github.com/spesmilo/electrum/blob/bc3013caf0d3d6a35290e9cc9e51125b7d03d14c/electrum.desktop)\n\nThis prevents icon theming without editing the launcher file.\n\nI'd like to ask you to change the icon location as well as the icon path line in the launcher according to the freedesktop.org standards.\n\nPlease find more info here:\nhttp://standards.freedesktop.org/icon-theme-spec/icon-theme-spec-latest.html\nhttps://github.com/Foggalong/hardcode-fixer/wiki/What,-Why-&-How\n\n---\n\nAccording to the above resources standard icon locations are:\n\n_/usr/share/icons/hicolor/[size]/apps/[icon name]_\n_~/.local/share/icons/hicolor/[size]/apps/[icon name]_\n_/usr/share/pixmaps/[icon name]_\n\nThe standard icon line in the .desktop launcher is:\n_Icon=[icon name]_ (without full path, without file extension)\n\nfor instance\n**Icon=electrum** \n(for an icon _electrum.svg_ or _electrum.png_ stored in standard location)\n\nCould you please move the icon to one of those locations and adjust the icon path in the .desktop file accordingly?\n\n", "before_files": [{"content": "#!/usr/bin/python\n\n# python setup.py sdist --format=zip,gztar\n\nfrom setuptools import setup\nimport os\nimport sys\nimport platform\nimport imp\n\n\nversion = imp.load_source('version', 'lib/version.py')\n\nif sys.version_info[:3] < (2, 7, 0):\n sys.exit(\"Error: Electrum requires Python version >= 2.7.0...\")\n\n\n\ndata_files = []\nif platform.system() == 'Linux' or platform.system() == 'FreeBSD':\n usr_share = os.path.join(sys.prefix, \"share\")\n data_files += [\n (os.path.join(usr_share, 'applications/'), ['electrum.desktop']),\n (os.path.join(usr_share, 'app-install', 'icons/'), ['icons/electrum.png'])\n ]\n\n\nsetup(\n name=\"Electrum\",\n version=version.ELECTRUM_VERSION,\n install_requires=[\n 'slowaes>=0.1a1',\n 'ecdsa>=0.9',\n 'pbkdf2',\n 'requests',\n 'pyasn1-modules',\n 'pyasn1',\n 'qrcode',\n 'protobuf',\n 'tlslite',\n 'dnspython',\n ],\n package_dir={\n 'electrum': 'lib',\n 'electrum_gui': 'gui',\n 'electrum_plugins': 'plugins',\n },\n packages=['electrum','electrum_gui','electrum_gui.qt','electrum_plugins'],\n package_data={\n 'electrum': [\n 'wordlist/*.txt',\n 'locale/*/LC_MESSAGES/electrum.mo',\n ],\n 'electrum_gui': [\n \"qt/themes/cleanlook/name.cfg\",\n \"qt/themes/cleanlook/style.css\",\n \"qt/themes/sahara/name.cfg\",\n \"qt/themes/sahara/style.css\",\n \"qt/themes/dark/name.cfg\",\n \"qt/themes/dark/style.css\",\n ]\n },\n scripts=['electrum'],\n data_files=data_files,\n description=\"Lightweight Bitcoin Wallet\",\n author=\"Thomas Voegtlin\",\n author_email=\"[email protected]\",\n license=\"GNU GPLv3\",\n url=\"https://electrum.org\",\n long_description=\"\"\"Lightweight Bitcoin Wallet\"\"\"\n)\n", "path": "setup.py"}]}
| 1,469 | 117 |
gh_patches_debug_55788
|
rasdani/github-patches
|
git_diff
|
deepset-ai__haystack-7086
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pipeline drawings in Colab have black background
**Describe the bug**
Since Haystack 2.0-beta8, Pipeline drawings in Colab and other environments (VS Code/Pycharm) randomly
have a black background.

These images are not nice and less readable than the previous ones **with transparent background**:

**To Reproduce**
Run the [first 2.0 tutorial](https://haystack.deepset.ai/tutorials/27_first_rag_pipeline)
**System:**
- Haystack version (commit or version number): 2.0-beta8
</issue>
<code>
[start of haystack/core/pipeline/draw.py]
1 # SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>
2 #
3 # SPDX-License-Identifier: Apache-2.0
4 import base64
5 import logging
6
7 import networkx # type:ignore
8 import requests
9
10 from haystack.core.errors import PipelineDrawingError
11 from haystack.core.pipeline.descriptions import find_pipeline_inputs, find_pipeline_outputs
12 from haystack.core.type_utils import _type_name
13
14 logger = logging.getLogger(__name__)
15
16
17 def _prepare_for_drawing(graph: networkx.MultiDiGraph) -> networkx.MultiDiGraph:
18 """
19 Add some extra nodes to show the inputs and outputs of the pipeline.
20 Also adds labels to edges.
21 """
22 # Label the edges
23 for inp, outp, key, data in graph.edges(keys=True, data=True):
24 data[
25 "label"
26 ] = f"{data['from_socket'].name} -> {data['to_socket'].name}{' (opt.)' if not data['mandatory'] else ''}"
27 graph.add_edge(inp, outp, key=key, **data)
28
29 # Add inputs fake node
30 graph.add_node("input")
31 for node, in_sockets in find_pipeline_inputs(graph).items():
32 for in_socket in in_sockets:
33 if not in_socket.senders and in_socket.is_mandatory:
34 # If this socket has no sender it could be a socket that receives input
35 # directly when running the Pipeline. We can't know that for sure, in doubt
36 # we draw it as receiving input directly.
37 graph.add_edge("input", node, label=in_socket.name, conn_type=_type_name(in_socket.type))
38
39 # Add outputs fake node
40 graph.add_node("output")
41 for node, out_sockets in find_pipeline_outputs(graph).items():
42 for out_socket in out_sockets:
43 graph.add_edge(node, "output", label=out_socket.name, conn_type=_type_name(out_socket.type))
44
45 return graph
46
47
48 ARROWTAIL_MANDATORY = "--"
49 ARROWTAIL_OPTIONAL = "-."
50 ARROWHEAD_MANDATORY = "-->"
51 ARROWHEAD_OPTIONAL = ".->"
52 MERMAID_STYLED_TEMPLATE = """
53 %%{{ init: {{'theme': 'neutral' }} }}%%
54
55 graph TD;
56
57 {connections}
58
59 classDef component text-align:center;
60 """
61
62
63 def _to_mermaid_image(graph: networkx.MultiDiGraph):
64 """
65 Renders a pipeline using Mermaid (hosted version at 'https://mermaid.ink'). Requires Internet access.
66 """
67 # Copy the graph to avoid modifying the original
68 graph_styled = _to_mermaid_text(graph.copy())
69
70 graphbytes = graph_styled.encode("ascii")
71 base64_bytes = base64.b64encode(graphbytes)
72 base64_string = base64_bytes.decode("ascii")
73 url = "https://mermaid.ink/img/" + base64_string
74
75 logging.debug("Rendeding graph at %s", url)
76 try:
77 resp = requests.get(url, timeout=10)
78 if resp.status_code >= 400:
79 logger.warning("Failed to draw the pipeline: https://mermaid.ink/img/ returned status %s", resp.status_code)
80 logger.info("Exact URL requested: %s", url)
81 logger.warning("No pipeline diagram will be saved.")
82 resp.raise_for_status()
83
84 except Exception as exc: # pylint: disable=broad-except
85 logger.warning("Failed to draw the pipeline: could not connect to https://mermaid.ink/img/ (%s)", exc)
86 logger.info("Exact URL requested: %s", url)
87 logger.warning("No pipeline diagram will be saved.")
88 raise PipelineDrawingError(
89 "There was an issue with https://mermaid.ink/, see the stacktrace for details."
90 ) from exc
91
92 return resp.content
93
94
95 def _to_mermaid_text(graph: networkx.MultiDiGraph) -> str:
96 """
97 Converts a Networkx graph into Mermaid syntax. The output of this function can be used in the documentation
98 with `mermaid` codeblocks and it will be automatically rendered.
99 """
100 # Copy the graph to avoid modifying the original
101 graph = _prepare_for_drawing(graph.copy())
102 sockets = {
103 comp: "".join(
104 [
105 f"<li>{name} ({_type_name(socket.type)})</li>"
106 for name, socket in data.get("input_sockets", {}).items()
107 if (not socket.is_mandatory and not socket.senders) or socket.is_variadic
108 ]
109 )
110 for comp, data in graph.nodes(data=True)
111 }
112 optional_inputs = {
113 comp: f"<br><br>Optional inputs:<ul style='text-align:left;'>{sockets}</ul>" if sockets else ""
114 for comp, sockets in sockets.items()
115 }
116
117 states = {
118 comp: f"{comp}[\"<b>{comp}</b><br><small><i>{type(data['instance']).__name__}{optional_inputs[comp]}</i></small>\"]:::component"
119 for comp, data in graph.nodes(data=True)
120 if comp not in ["input", "output"]
121 }
122
123 connections_list = []
124 for from_comp, to_comp, conn_data in graph.edges(data=True):
125 if from_comp != "input" and to_comp != "output":
126 arrowtail = ARROWTAIL_MANDATORY if conn_data["mandatory"] else ARROWTAIL_OPTIONAL
127 arrowhead = ARROWHEAD_MANDATORY if conn_data["mandatory"] else ARROWHEAD_OPTIONAL
128 label = f'"{conn_data["label"]}<br><small><i>{conn_data["conn_type"]}</i></small>"'
129 conn_string = f"{states[from_comp]} {arrowtail} {label} {arrowhead} {states[to_comp]}"
130 connections_list.append(conn_string)
131
132 input_connections = [
133 f"i{{*}} -- \"{conn_data['label']}<br><small><i>{conn_data['conn_type']}</i></small>\" --> {states[to_comp]}"
134 for _, to_comp, conn_data in graph.out_edges("input", data=True)
135 ]
136 output_connections = [
137 f"{states[from_comp]} -- \"{conn_data['label']}<br><small><i>{conn_data['conn_type']}</i></small>\"--> o{{*}}"
138 for from_comp, _, conn_data in graph.in_edges("output", data=True)
139 ]
140 connections = "\n".join(connections_list + input_connections + output_connections)
141
142 graph_styled = MERMAID_STYLED_TEMPLATE.format(connections=connections)
143 logger.debug("Mermaid diagram:\n%s", graph_styled)
144
145 return graph_styled
146
[end of haystack/core/pipeline/draw.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/haystack/core/pipeline/draw.py b/haystack/core/pipeline/draw.py
--- a/haystack/core/pipeline/draw.py
+++ b/haystack/core/pipeline/draw.py
@@ -70,7 +70,7 @@
graphbytes = graph_styled.encode("ascii")
base64_bytes = base64.b64encode(graphbytes)
base64_string = base64_bytes.decode("ascii")
- url = "https://mermaid.ink/img/" + base64_string
+ url = f"https://mermaid.ink/img/{base64_string}?type=png"
logging.debug("Rendeding graph at %s", url)
try:
|
{"golden_diff": "diff --git a/haystack/core/pipeline/draw.py b/haystack/core/pipeline/draw.py\n--- a/haystack/core/pipeline/draw.py\n+++ b/haystack/core/pipeline/draw.py\n@@ -70,7 +70,7 @@\n graphbytes = graph_styled.encode(\"ascii\")\n base64_bytes = base64.b64encode(graphbytes)\n base64_string = base64_bytes.decode(\"ascii\")\n- url = \"https://mermaid.ink/img/\" + base64_string\n+ url = f\"https://mermaid.ink/img/{base64_string}?type=png\"\n \n logging.debug(\"Rendeding graph at %s\", url)\n try:\n", "issue": "Pipeline drawings in Colab have black background\n**Describe the bug**\r\nSince Haystack 2.0-beta8, Pipeline drawings in Colab and other environments (VS Code/Pycharm) randomly\r\nhave a black background.\r\n\r\n\r\n\r\nThese images are not nice and less readable than the previous ones **with transparent background**:\r\n\r\n\r\n\r\n**To Reproduce**\r\nRun the [first 2.0 tutorial](https://haystack.deepset.ai/tutorials/27_first_rag_pipeline)\r\n\r\n\r\n**System:**\r\n - Haystack version (commit or version number): 2.0-beta8\r\n\n", "before_files": [{"content": "# SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>\n#\n# SPDX-License-Identifier: Apache-2.0\nimport base64\nimport logging\n\nimport networkx # type:ignore\nimport requests\n\nfrom haystack.core.errors import PipelineDrawingError\nfrom haystack.core.pipeline.descriptions import find_pipeline_inputs, find_pipeline_outputs\nfrom haystack.core.type_utils import _type_name\n\nlogger = logging.getLogger(__name__)\n\n\ndef _prepare_for_drawing(graph: networkx.MultiDiGraph) -> networkx.MultiDiGraph:\n \"\"\"\n Add some extra nodes to show the inputs and outputs of the pipeline.\n Also adds labels to edges.\n \"\"\"\n # Label the edges\n for inp, outp, key, data in graph.edges(keys=True, data=True):\n data[\n \"label\"\n ] = f\"{data['from_socket'].name} -> {data['to_socket'].name}{' (opt.)' if not data['mandatory'] else ''}\"\n graph.add_edge(inp, outp, key=key, **data)\n\n # Add inputs fake node\n graph.add_node(\"input\")\n for node, in_sockets in find_pipeline_inputs(graph).items():\n for in_socket in in_sockets:\n if not in_socket.senders and in_socket.is_mandatory:\n # If this socket has no sender it could be a socket that receives input\n # directly when running the Pipeline. We can't know that for sure, in doubt\n # we draw it as receiving input directly.\n graph.add_edge(\"input\", node, label=in_socket.name, conn_type=_type_name(in_socket.type))\n\n # Add outputs fake node\n graph.add_node(\"output\")\n for node, out_sockets in find_pipeline_outputs(graph).items():\n for out_socket in out_sockets:\n graph.add_edge(node, \"output\", label=out_socket.name, conn_type=_type_name(out_socket.type))\n\n return graph\n\n\nARROWTAIL_MANDATORY = \"--\"\nARROWTAIL_OPTIONAL = \"-.\"\nARROWHEAD_MANDATORY = \"-->\"\nARROWHEAD_OPTIONAL = \".->\"\nMERMAID_STYLED_TEMPLATE = \"\"\"\n%%{{ init: {{'theme': 'neutral' }} }}%%\n\ngraph TD;\n\n{connections}\n\nclassDef component text-align:center;\n\"\"\"\n\n\ndef _to_mermaid_image(graph: networkx.MultiDiGraph):\n \"\"\"\n Renders a pipeline using Mermaid (hosted version at 'https://mermaid.ink'). Requires Internet access.\n \"\"\"\n # Copy the graph to avoid modifying the original\n graph_styled = _to_mermaid_text(graph.copy())\n\n graphbytes = graph_styled.encode(\"ascii\")\n base64_bytes = base64.b64encode(graphbytes)\n base64_string = base64_bytes.decode(\"ascii\")\n url = \"https://mermaid.ink/img/\" + base64_string\n\n logging.debug(\"Rendeding graph at %s\", url)\n try:\n resp = requests.get(url, timeout=10)\n if resp.status_code >= 400:\n logger.warning(\"Failed to draw the pipeline: https://mermaid.ink/img/ returned status %s\", resp.status_code)\n logger.info(\"Exact URL requested: %s\", url)\n logger.warning(\"No pipeline diagram will be saved.\")\n resp.raise_for_status()\n\n except Exception as exc: # pylint: disable=broad-except\n logger.warning(\"Failed to draw the pipeline: could not connect to https://mermaid.ink/img/ (%s)\", exc)\n logger.info(\"Exact URL requested: %s\", url)\n logger.warning(\"No pipeline diagram will be saved.\")\n raise PipelineDrawingError(\n \"There was an issue with https://mermaid.ink/, see the stacktrace for details.\"\n ) from exc\n\n return resp.content\n\n\ndef _to_mermaid_text(graph: networkx.MultiDiGraph) -> str:\n \"\"\"\n Converts a Networkx graph into Mermaid syntax. The output of this function can be used in the documentation\n with `mermaid` codeblocks and it will be automatically rendered.\n \"\"\"\n # Copy the graph to avoid modifying the original\n graph = _prepare_for_drawing(graph.copy())\n sockets = {\n comp: \"\".join(\n [\n f\"<li>{name} ({_type_name(socket.type)})</li>\"\n for name, socket in data.get(\"input_sockets\", {}).items()\n if (not socket.is_mandatory and not socket.senders) or socket.is_variadic\n ]\n )\n for comp, data in graph.nodes(data=True)\n }\n optional_inputs = {\n comp: f\"<br><br>Optional inputs:<ul style='text-align:left;'>{sockets}</ul>\" if sockets else \"\"\n for comp, sockets in sockets.items()\n }\n\n states = {\n comp: f\"{comp}[\\\"<b>{comp}</b><br><small><i>{type(data['instance']).__name__}{optional_inputs[comp]}</i></small>\\\"]:::component\"\n for comp, data in graph.nodes(data=True)\n if comp not in [\"input\", \"output\"]\n }\n\n connections_list = []\n for from_comp, to_comp, conn_data in graph.edges(data=True):\n if from_comp != \"input\" and to_comp != \"output\":\n arrowtail = ARROWTAIL_MANDATORY if conn_data[\"mandatory\"] else ARROWTAIL_OPTIONAL\n arrowhead = ARROWHEAD_MANDATORY if conn_data[\"mandatory\"] else ARROWHEAD_OPTIONAL\n label = f'\"{conn_data[\"label\"]}<br><small><i>{conn_data[\"conn_type\"]}</i></small>\"'\n conn_string = f\"{states[from_comp]} {arrowtail} {label} {arrowhead} {states[to_comp]}\"\n connections_list.append(conn_string)\n\n input_connections = [\n f\"i{{*}} -- \\\"{conn_data['label']}<br><small><i>{conn_data['conn_type']}</i></small>\\\" --> {states[to_comp]}\"\n for _, to_comp, conn_data in graph.out_edges(\"input\", data=True)\n ]\n output_connections = [\n f\"{states[from_comp]} -- \\\"{conn_data['label']}<br><small><i>{conn_data['conn_type']}</i></small>\\\"--> o{{*}}\"\n for from_comp, _, conn_data in graph.in_edges(\"output\", data=True)\n ]\n connections = \"\\n\".join(connections_list + input_connections + output_connections)\n\n graph_styled = MERMAID_STYLED_TEMPLATE.format(connections=connections)\n logger.debug(\"Mermaid diagram:\\n%s\", graph_styled)\n\n return graph_styled\n", "path": "haystack/core/pipeline/draw.py"}]}
| 2,547 | 160 |
gh_patches_debug_26343
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-2165
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Money formatting should start off with correct decimal places
By default, money columns should have 2 decimal places in the display options.
Currently, when you import the money in the demo video, there are values like `$15.6` – by default, that should be `$15.60`.
</issue>
<code>
[start of mathesar/api/serializers/columns.py]
1 from rest_framework import serializers, status
2 from rest_framework.exceptions import ValidationError
3 from rest_framework.fields import empty, SerializerMethodField
4 from rest_framework.settings import api_settings
5
6 from db.columns.exceptions import InvalidTypeError
7 from db.columns.exceptions import InvalidTypeOptionError
8 from db.types.base import PostgresType
9 from db.types.operations.convert import get_db_type_enum_from_id
10 from mathesar.api.exceptions.database_exceptions import (
11 exceptions as database_api_exceptions
12 )
13 from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin
14 from mathesar.api.serializers.shared_serializers import (
15 DisplayOptionsMappingSerializer,
16 DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY,
17 )
18 from mathesar.models.base import Column
19
20
21 class InputValueField(serializers.CharField):
22 """
23 Takes in an arbitrary value. Emulates the record creation endpoint,
24 which takes in arbitrary values (un-validated and un-processed request.data).
25 This field replicates that behavior in a serializer.
26 """
27
28 def to_internal_value(self, data):
29 return data
30
31 def to_representation(self, value):
32 return value
33
34
35 class TypeOptionSerializer(MathesarErrorMessageMixin, serializers.Serializer):
36 length = serializers.IntegerField(required=False)
37 precision = serializers.IntegerField(required=False)
38 scale = serializers.IntegerField(required=False)
39 fields = serializers.CharField(required=False)
40
41 def validate(self, attrs):
42 db_type = self.context.get('db_type', None)
43 scale = attrs.get('scale', None)
44 precision = attrs.get('precision', None)
45 if (
46 db_type == PostgresType.NUMERIC
47 and (scale is None) != (precision is None)
48 ):
49 raise database_api_exceptions.InvalidTypeOptionAPIException(
50 InvalidTypeOptionError,
51 message='Both scale and precision fields are required.',
52 status_code=status.HTTP_400_BAD_REQUEST,
53 )
54 return super().validate(attrs)
55
56 def run_validation(self, data=empty):
57 # Ensure that there are no unknown type options passed in.
58 if data is not empty and data is not None:
59 unknown = set(data) - set(self.fields)
60 if unknown:
61 errors = ['Unknown field: {}'.format(field) for field in unknown]
62 raise serializers.ValidationError({
63 api_settings.NON_FIELD_ERRORS_KEY: errors,
64 })
65
66 return super(TypeOptionSerializer, self).run_validation(data)
67
68
69 TYPE_KEY = 'type'
70 DISPLAY_OPTIONS_KEY = 'display_options'
71
72
73 class SimpleColumnSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):
74 class Meta:
75 model = Column
76 fields = ('id',
77 'name',
78 TYPE_KEY,
79 'type_options',
80 DISPLAY_OPTIONS_KEY,
81 )
82 id = serializers.IntegerField(required=False)
83 name = serializers.CharField()
84 # TODO consider renaming type and type_options to db_type and db_type_options
85 # The name of below attribute should match value of TYPE_KEY
86 type = serializers.CharField()
87 type_options = TypeOptionSerializer(required=False, allow_null=True)
88 # The name of below attribute should match value of DISPLAY_OPTIONS_KEY
89 display_options = DisplayOptionsMappingSerializer(required=False, allow_null=True)
90
91 def to_representation(self, instance):
92 if isinstance(instance, dict):
93 db_type_id = instance.get(TYPE_KEY)
94 db_type = get_db_type_enum_from_id(db_type_id)
95 else:
96 db_type = instance.db_type
97 # TODO replace or remove this assert before production
98 assert db_type is not None
99 self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY] = db_type
100 representation = super().to_representation(instance)
101 _force_canonical_type(representation, db_type)
102 return representation
103
104 def to_internal_value(self, data):
105 if self.partial and TYPE_KEY not in data:
106 db_type = getattr(self.instance, 'db_type', None)
107 else:
108 db_type_id = data.get(TYPE_KEY, None)
109 db_type = get_db_type_enum_from_id(db_type_id) if db_type_id else None
110 self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY] = db_type
111 return super().to_internal_value(data)
112
113
114 def _force_canonical_type(representation, db_type):
115 """
116 Sometimes the representation's TYPE_KEY attribute will also include type option information
117 (e.g. `numeric(3, 5)`). We override the attribute's value to a canonical type id.
118
119 This might be better solved upstream, but since our Column model subclasses SA's Column,
120 overriding its TYPE_KEY attribute, might interfere with SA's workings.
121 """
122 representation[TYPE_KEY] = db_type.id
123 return representation
124
125
126 class ColumnDefaultSerializer(MathesarErrorMessageMixin, serializers.Serializer):
127 value = InputValueField()
128 is_dynamic = serializers.BooleanField(read_only=True)
129
130
131 class ColumnSerializer(SimpleColumnSerializer):
132 class Meta(SimpleColumnSerializer.Meta):
133 fields = SimpleColumnSerializer.Meta.fields + (
134 'nullable',
135 'primary_key',
136 'source_column',
137 'copy_source_data',
138 'copy_source_constraints',
139 'valid_target_types',
140 'default',
141 'has_dependents',
142 )
143 model_fields = (DISPLAY_OPTIONS_KEY,)
144
145 name = serializers.CharField(required=False, allow_blank=True)
146
147 # From scratch fields
148 type = serializers.CharField(required=False)
149 nullable = serializers.BooleanField(default=True)
150 primary_key = serializers.BooleanField(default=False)
151 default = ColumnDefaultSerializer(
152 source='column_default_dict', required=False, allow_null=True, default=None
153 )
154
155 # From duplication fields
156 source_column = serializers.PrimaryKeyRelatedField(queryset=Column.current_objects.all(), required=False, write_only=True)
157 copy_source_data = serializers.BooleanField(default=True, write_only=True)
158 copy_source_constraints = serializers.BooleanField(default=True, write_only=True)
159
160 # Read only fields
161 valid_target_types = SerializerMethodField(method_name='get_valid_target_types', read_only=True)
162
163 def validate(self, data):
164 data = super().validate(data)
165 # Reevaluate column display options based on the new column type.
166 if TYPE_KEY in data and self.instance:
167 db_type = get_db_type_enum_from_id(data[TYPE_KEY].lower())
168 target_types = self.instance.valid_target_types
169 if db_type not in target_types:
170 raise database_api_exceptions.InvalidTypeCastAPIException(
171 InvalidTypeError,
172 status_code=status.HTTP_400_BAD_REQUEST
173 )
174 if DISPLAY_OPTIONS_KEY not in data:
175 db_type = getattr(self.instance, 'db_type', None)
176 # Invalidate display_options if type has been changed
177 if db_type is not None:
178 if str(db_type.id) != data[TYPE_KEY]:
179 data[DISPLAY_OPTIONS_KEY] = None
180 if not self.partial:
181 from_scratch_required_fields = [TYPE_KEY]
182 from_scratch_specific_fields = [TYPE_KEY, 'nullable', 'primary_key']
183 from_dupe_required_fields = ['source_column']
184 from_dupe_specific_fields = ['source_column', 'copy_source_data',
185 'copy_source_constraints']
186
187 # Note that we run validation on self.initial_data, as `data` has defaults
188 # filled in for fields that weren't specified by the request
189 from_scratch_required_all = all([
190 f in self.initial_data for f in from_scratch_required_fields
191 ])
192 from_scratch_specific_in = [
193 f for f in from_scratch_specific_fields if f in self.initial_data
194 ]
195 from_dupe_required_all = all([
196 f in self.initial_data for f in from_dupe_required_fields
197 ])
198 from_dupe_specific_in = [
199 f for f in from_dupe_specific_fields if f in self.initial_data
200 ]
201
202 if len(from_dupe_specific_in) and len(from_scratch_specific_in):
203 raise ValidationError(
204 f'{from_scratch_specific_in} cannot be passed in if '
205 f'{from_dupe_specific_in} has also been passed in.'
206 )
207 elif not from_dupe_required_all and not from_scratch_required_all:
208 # We default to from scratch required fields if no fields are passed
209 if len(from_dupe_specific_in) and not len(from_scratch_specific_in):
210 required_fields = from_dupe_required_fields
211 else:
212 required_fields = from_scratch_required_fields
213 raise ValidationError({
214 f: ['This field is required.']
215 for f in required_fields
216 if f not in self.initial_data
217 })
218 return data
219
220 @property
221 def validated_model_fields(self):
222 return {key: self.validated_data[key] for key in self.validated_data if key in self.Meta.model_fields}
223
224 def get_valid_target_types(self, column):
225 valid_target_types = column.valid_target_types
226 if valid_target_types:
227 valid_target_type_ids = tuple(
228 db_type.id for db_type in valid_target_types
229 )
230 return valid_target_type_ids
231
[end of mathesar/api/serializers/columns.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mathesar/api/serializers/columns.py b/mathesar/api/serializers/columns.py
--- a/mathesar/api/serializers/columns.py
+++ b/mathesar/api/serializers/columns.py
@@ -5,7 +5,7 @@
from db.columns.exceptions import InvalidTypeError
from db.columns.exceptions import InvalidTypeOptionError
-from db.types.base import PostgresType
+from db.types.base import PostgresType, MathesarCustomType
from db.types.operations.convert import get_db_type_enum_from_id
from mathesar.api.exceptions.database_exceptions import (
exceptions as database_api_exceptions
@@ -217,6 +217,21 @@
})
return data
+ def to_representation(self, instance):
+ # Set default display_options for mathesar_money type if none are provided.
+ if (
+ instance.db_type == MathesarCustomType.MATHESAR_MONEY
+ and instance.display_options is None
+ ):
+ instance.display_options = {
+ 'use_grouping': 'true',
+ 'number_format': None,
+ 'currency_symbol': None,
+ 'maximum_fraction_digits': 2,
+ 'minimum_fraction_digits': 2,
+ 'currency_symbol_location': 'after-minus'}
+ return super().to_representation(instance)
+
@property
def validated_model_fields(self):
return {key: self.validated_data[key] for key in self.validated_data if key in self.Meta.model_fields}
|
{"golden_diff": "diff --git a/mathesar/api/serializers/columns.py b/mathesar/api/serializers/columns.py\n--- a/mathesar/api/serializers/columns.py\n+++ b/mathesar/api/serializers/columns.py\n@@ -5,7 +5,7 @@\n \n from db.columns.exceptions import InvalidTypeError\n from db.columns.exceptions import InvalidTypeOptionError\n-from db.types.base import PostgresType\n+from db.types.base import PostgresType, MathesarCustomType\n from db.types.operations.convert import get_db_type_enum_from_id\n from mathesar.api.exceptions.database_exceptions import (\n exceptions as database_api_exceptions\n@@ -217,6 +217,21 @@\n })\n return data\n \n+ def to_representation(self, instance):\n+ # Set default display_options for mathesar_money type if none are provided.\n+ if (\n+ instance.db_type == MathesarCustomType.MATHESAR_MONEY\n+ and instance.display_options is None\n+ ):\n+ instance.display_options = {\n+ 'use_grouping': 'true',\n+ 'number_format': None,\n+ 'currency_symbol': None,\n+ 'maximum_fraction_digits': 2,\n+ 'minimum_fraction_digits': 2,\n+ 'currency_symbol_location': 'after-minus'}\n+ return super().to_representation(instance)\n+\n @property\n def validated_model_fields(self):\n return {key: self.validated_data[key] for key in self.validated_data if key in self.Meta.model_fields}\n", "issue": "Money formatting should start off with correct decimal places\nBy default, money columns should have 2 decimal places in the display options.\r\n\r\nCurrently, when you import the money in the demo video, there are values like `$15.6` \u2013 by default, that should be `$15.60`.\n", "before_files": [{"content": "from rest_framework import serializers, status\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.fields import empty, SerializerMethodField\nfrom rest_framework.settings import api_settings\n\nfrom db.columns.exceptions import InvalidTypeError\nfrom db.columns.exceptions import InvalidTypeOptionError\nfrom db.types.base import PostgresType\nfrom db.types.operations.convert import get_db_type_enum_from_id\nfrom mathesar.api.exceptions.database_exceptions import (\n exceptions as database_api_exceptions\n)\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\nfrom mathesar.api.serializers.shared_serializers import (\n DisplayOptionsMappingSerializer,\n DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY,\n)\nfrom mathesar.models.base import Column\n\n\nclass InputValueField(serializers.CharField):\n \"\"\"\n Takes in an arbitrary value. Emulates the record creation endpoint,\n which takes in arbitrary values (un-validated and un-processed request.data).\n This field replicates that behavior in a serializer.\n \"\"\"\n\n def to_internal_value(self, data):\n return data\n\n def to_representation(self, value):\n return value\n\n\nclass TypeOptionSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n length = serializers.IntegerField(required=False)\n precision = serializers.IntegerField(required=False)\n scale = serializers.IntegerField(required=False)\n fields = serializers.CharField(required=False)\n\n def validate(self, attrs):\n db_type = self.context.get('db_type', None)\n scale = attrs.get('scale', None)\n precision = attrs.get('precision', None)\n if (\n db_type == PostgresType.NUMERIC\n and (scale is None) != (precision is None)\n ):\n raise database_api_exceptions.InvalidTypeOptionAPIException(\n InvalidTypeOptionError,\n message='Both scale and precision fields are required.',\n status_code=status.HTTP_400_BAD_REQUEST,\n )\n return super().validate(attrs)\n\n def run_validation(self, data=empty):\n # Ensure that there are no unknown type options passed in.\n if data is not empty and data is not None:\n unknown = set(data) - set(self.fields)\n if unknown:\n errors = ['Unknown field: {}'.format(field) for field in unknown]\n raise serializers.ValidationError({\n api_settings.NON_FIELD_ERRORS_KEY: errors,\n })\n\n return super(TypeOptionSerializer, self).run_validation(data)\n\n\nTYPE_KEY = 'type'\nDISPLAY_OPTIONS_KEY = 'display_options'\n\n\nclass SimpleColumnSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):\n class Meta:\n model = Column\n fields = ('id',\n 'name',\n TYPE_KEY,\n 'type_options',\n DISPLAY_OPTIONS_KEY,\n )\n id = serializers.IntegerField(required=False)\n name = serializers.CharField()\n # TODO consider renaming type and type_options to db_type and db_type_options\n # The name of below attribute should match value of TYPE_KEY\n type = serializers.CharField()\n type_options = TypeOptionSerializer(required=False, allow_null=True)\n # The name of below attribute should match value of DISPLAY_OPTIONS_KEY\n display_options = DisplayOptionsMappingSerializer(required=False, allow_null=True)\n\n def to_representation(self, instance):\n if isinstance(instance, dict):\n db_type_id = instance.get(TYPE_KEY)\n db_type = get_db_type_enum_from_id(db_type_id)\n else:\n db_type = instance.db_type\n # TODO replace or remove this assert before production\n assert db_type is not None\n self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY] = db_type\n representation = super().to_representation(instance)\n _force_canonical_type(representation, db_type)\n return representation\n\n def to_internal_value(self, data):\n if self.partial and TYPE_KEY not in data:\n db_type = getattr(self.instance, 'db_type', None)\n else:\n db_type_id = data.get(TYPE_KEY, None)\n db_type = get_db_type_enum_from_id(db_type_id) if db_type_id else None\n self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY] = db_type\n return super().to_internal_value(data)\n\n\ndef _force_canonical_type(representation, db_type):\n \"\"\"\n Sometimes the representation's TYPE_KEY attribute will also include type option information\n (e.g. `numeric(3, 5)`). We override the attribute's value to a canonical type id.\n\n This might be better solved upstream, but since our Column model subclasses SA's Column,\n overriding its TYPE_KEY attribute, might interfere with SA's workings.\n \"\"\"\n representation[TYPE_KEY] = db_type.id\n return representation\n\n\nclass ColumnDefaultSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n value = InputValueField()\n is_dynamic = serializers.BooleanField(read_only=True)\n\n\nclass ColumnSerializer(SimpleColumnSerializer):\n class Meta(SimpleColumnSerializer.Meta):\n fields = SimpleColumnSerializer.Meta.fields + (\n 'nullable',\n 'primary_key',\n 'source_column',\n 'copy_source_data',\n 'copy_source_constraints',\n 'valid_target_types',\n 'default',\n 'has_dependents',\n )\n model_fields = (DISPLAY_OPTIONS_KEY,)\n\n name = serializers.CharField(required=False, allow_blank=True)\n\n # From scratch fields\n type = serializers.CharField(required=False)\n nullable = serializers.BooleanField(default=True)\n primary_key = serializers.BooleanField(default=False)\n default = ColumnDefaultSerializer(\n source='column_default_dict', required=False, allow_null=True, default=None\n )\n\n # From duplication fields\n source_column = serializers.PrimaryKeyRelatedField(queryset=Column.current_objects.all(), required=False, write_only=True)\n copy_source_data = serializers.BooleanField(default=True, write_only=True)\n copy_source_constraints = serializers.BooleanField(default=True, write_only=True)\n\n # Read only fields\n valid_target_types = SerializerMethodField(method_name='get_valid_target_types', read_only=True)\n\n def validate(self, data):\n data = super().validate(data)\n # Reevaluate column display options based on the new column type.\n if TYPE_KEY in data and self.instance:\n db_type = get_db_type_enum_from_id(data[TYPE_KEY].lower())\n target_types = self.instance.valid_target_types\n if db_type not in target_types:\n raise database_api_exceptions.InvalidTypeCastAPIException(\n InvalidTypeError,\n status_code=status.HTTP_400_BAD_REQUEST\n )\n if DISPLAY_OPTIONS_KEY not in data:\n db_type = getattr(self.instance, 'db_type', None)\n # Invalidate display_options if type has been changed\n if db_type is not None:\n if str(db_type.id) != data[TYPE_KEY]:\n data[DISPLAY_OPTIONS_KEY] = None\n if not self.partial:\n from_scratch_required_fields = [TYPE_KEY]\n from_scratch_specific_fields = [TYPE_KEY, 'nullable', 'primary_key']\n from_dupe_required_fields = ['source_column']\n from_dupe_specific_fields = ['source_column', 'copy_source_data',\n 'copy_source_constraints']\n\n # Note that we run validation on self.initial_data, as `data` has defaults\n # filled in for fields that weren't specified by the request\n from_scratch_required_all = all([\n f in self.initial_data for f in from_scratch_required_fields\n ])\n from_scratch_specific_in = [\n f for f in from_scratch_specific_fields if f in self.initial_data\n ]\n from_dupe_required_all = all([\n f in self.initial_data for f in from_dupe_required_fields\n ])\n from_dupe_specific_in = [\n f for f in from_dupe_specific_fields if f in self.initial_data\n ]\n\n if len(from_dupe_specific_in) and len(from_scratch_specific_in):\n raise ValidationError(\n f'{from_scratch_specific_in} cannot be passed in if '\n f'{from_dupe_specific_in} has also been passed in.'\n )\n elif not from_dupe_required_all and not from_scratch_required_all:\n # We default to from scratch required fields if no fields are passed\n if len(from_dupe_specific_in) and not len(from_scratch_specific_in):\n required_fields = from_dupe_required_fields\n else:\n required_fields = from_scratch_required_fields\n raise ValidationError({\n f: ['This field is required.']\n for f in required_fields\n if f not in self.initial_data\n })\n return data\n\n @property\n def validated_model_fields(self):\n return {key: self.validated_data[key] for key in self.validated_data if key in self.Meta.model_fields}\n\n def get_valid_target_types(self, column):\n valid_target_types = column.valid_target_types\n if valid_target_types:\n valid_target_type_ids = tuple(\n db_type.id for db_type in valid_target_types\n )\n return valid_target_type_ids\n", "path": "mathesar/api/serializers/columns.py"}]}
| 3,052 | 321 |
gh_patches_debug_2600
|
rasdani/github-patches
|
git_diff
|
dmlc__dgl-2505
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
jtnn example error
NOCUDA=1 python3 vaetrain_dgl.py
it shows NameError: name 'tensor' is not defined in dgl/examples/pytorch/jtnn/jtnn/nnutils.py", line 11, in cuda
return tensor
env:
dgl 0.5.3
torch 1.7.1
mac os
</issue>
<code>
[start of examples/pytorch/jtnn/jtnn/nnutils.py]
1 import torch
2 import torch.nn as nn
3 import os
4 import dgl
5
6
7 def cuda(x):
8 if torch.cuda.is_available() and not os.getenv('NOCUDA', None):
9 return x.to(torch.device('cuda')) # works for both DGLGraph and tensor
10 else:
11 return tensor
12
13
14 class GRUUpdate(nn.Module):
15 def __init__(self, hidden_size):
16 nn.Module.__init__(self)
17 self.hidden_size = hidden_size
18
19 self.W_z = nn.Linear(2 * hidden_size, hidden_size)
20 self.W_r = nn.Linear(hidden_size, hidden_size, bias=False)
21 self.U_r = nn.Linear(hidden_size, hidden_size)
22 self.W_h = nn.Linear(2 * hidden_size, hidden_size)
23
24 def update_zm(self, node):
25 src_x = node.data['src_x']
26 s = node.data['s']
27 rm = node.data['accum_rm']
28 z = torch.sigmoid(self.W_z(torch.cat([src_x, s], 1)))
29 m = torch.tanh(self.W_h(torch.cat([src_x, rm], 1)))
30 m = (1 - z) * s + z * m
31 return {'m': m, 'z': z}
32
33 def update_r(self, node, zm=None):
34 dst_x = node.data['dst_x']
35 m = node.data['m'] if zm is None else zm['m']
36 r_1 = self.W_r(dst_x)
37 r_2 = self.U_r(m)
38 r = torch.sigmoid(r_1 + r_2)
39 return {'r': r, 'rm': r * m}
40
41 def forward(self, node):
42 dic = self.update_zm(node)
43 dic.update(self.update_r(node, zm=dic))
44 return dic
45
46 def tocpu(g):
47 src, dst = g.edges()
48 src = src.cpu()
49 dst = dst.cpu()
50 return dgl.graph((src, dst), num_nodes=g.number_of_nodes())
51
[end of examples/pytorch/jtnn/jtnn/nnutils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/examples/pytorch/jtnn/jtnn/nnutils.py b/examples/pytorch/jtnn/jtnn/nnutils.py
--- a/examples/pytorch/jtnn/jtnn/nnutils.py
+++ b/examples/pytorch/jtnn/jtnn/nnutils.py
@@ -8,7 +8,7 @@
if torch.cuda.is_available() and not os.getenv('NOCUDA', None):
return x.to(torch.device('cuda')) # works for both DGLGraph and tensor
else:
- return tensor
+ return x
class GRUUpdate(nn.Module):
|
{"golden_diff": "diff --git a/examples/pytorch/jtnn/jtnn/nnutils.py b/examples/pytorch/jtnn/jtnn/nnutils.py\n--- a/examples/pytorch/jtnn/jtnn/nnutils.py\n+++ b/examples/pytorch/jtnn/jtnn/nnutils.py\n@@ -8,7 +8,7 @@\n if torch.cuda.is_available() and not os.getenv('NOCUDA', None):\n return x.to(torch.device('cuda')) # works for both DGLGraph and tensor\n else:\n- return tensor\n+ return x\n \n \n class GRUUpdate(nn.Module):\n", "issue": "jtnn example error\nNOCUDA=1 python3 vaetrain_dgl.py\r\nit shows NameError: name 'tensor' is not defined in dgl/examples/pytorch/jtnn/jtnn/nnutils.py\", line 11, in cuda\r\n return tensor\r\n\r\nenv: \r\ndgl 0.5.3\r\ntorch 1.7.1\r\nmac os\r\n\r\n\r\n\n", "before_files": [{"content": "import torch\nimport torch.nn as nn\nimport os\nimport dgl\n\n\ndef cuda(x):\n if torch.cuda.is_available() and not os.getenv('NOCUDA', None):\n return x.to(torch.device('cuda')) # works for both DGLGraph and tensor\n else:\n return tensor\n\n\nclass GRUUpdate(nn.Module):\n def __init__(self, hidden_size):\n nn.Module.__init__(self)\n self.hidden_size = hidden_size\n\n self.W_z = nn.Linear(2 * hidden_size, hidden_size)\n self.W_r = nn.Linear(hidden_size, hidden_size, bias=False)\n self.U_r = nn.Linear(hidden_size, hidden_size)\n self.W_h = nn.Linear(2 * hidden_size, hidden_size)\n\n def update_zm(self, node):\n src_x = node.data['src_x']\n s = node.data['s']\n rm = node.data['accum_rm']\n z = torch.sigmoid(self.W_z(torch.cat([src_x, s], 1)))\n m = torch.tanh(self.W_h(torch.cat([src_x, rm], 1)))\n m = (1 - z) * s + z * m\n return {'m': m, 'z': z}\n\n def update_r(self, node, zm=None):\n dst_x = node.data['dst_x']\n m = node.data['m'] if zm is None else zm['m']\n r_1 = self.W_r(dst_x)\n r_2 = self.U_r(m)\n r = torch.sigmoid(r_1 + r_2)\n return {'r': r, 'rm': r * m}\n\n def forward(self, node):\n dic = self.update_zm(node)\n dic.update(self.update_r(node, zm=dic))\n return dic\n\ndef tocpu(g):\n src, dst = g.edges()\n src = src.cpu()\n dst = dst.cpu()\n return dgl.graph((src, dst), num_nodes=g.number_of_nodes())\n", "path": "examples/pytorch/jtnn/jtnn/nnutils.py"}]}
| 1,159 | 133 |
gh_patches_debug_178
|
rasdani/github-patches
|
git_diff
|
napalm-automation__napalm-904
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`get_lldp_neighbors_detail()` fails on Arista 7150S
```python
In [1]: from napalm.eos import EOSDriver
In [2]: from getpass import getpass
In [3]: with EOSDriver("arista", "bewing", getpass()) as d:
...: print(d.get_lldp_neighbors_detail())
...:
Password:
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-3-85f875e30fe3> in <module>
1 with EOSDriver("arista", "bewing", getpass()) as d:
----> 2 print(d.get_lldp_neighbors_detail())
3
/mnt/c/Users/bewing/PycharmProjects/napalm/napalm/eos/eos.py in get_lldp_neighbors_detail(self, interface)
647 lldp_neighbors_out[interface] = []
648 capabilities = neighbor.get("systemCapabilities", {})
--> 649 available_capabilities = self._transform_lldp_capab(capabilities.keys())
650 enabled_capabilities = self._transform_lldp_capab(
651 [capab for capab, enabled in capabilities.items() if enabled]
/mnt/c/Users/bewing/PycharmProjects/napalm/napalm/eos/eos.py in _transform_lldp_capab(self, capabilities)
616
617 def _transform_lldp_capab(self, capabilities):
--> 618 return sorted([LLDP_CAPAB_TRANFORM_TABLE[c.lower()] for c in capabilities])
619
620 def get_lldp_neighbors_detail(self, interface=""):
/mnt/c/Users/bewing/PycharmProjects/napalm/napalm/eos/eos.py in <listcomp>(.0)
616
617 def _transform_lldp_capab(self, capabilities):
--> 618 return sorted([LLDP_CAPAB_TRANFORM_TABLE[c.lower()] for c in capabilities])
619
620 def get_lldp_neighbors_detail(self, interface=""):
KeyError: 'stationonly'
```
</issue>
<code>
[start of napalm/eos/constants.py]
1 # Based on:
2 # https://code.getnoc.com/noc/noc/blob/6f3db2a6e4b1ece77aaf4c4c98413e35ff64643a/sa/profiles/Arista/EOS/get_lldp_neighbors.py#L76-79
3 LLDP_CAPAB_TRANFORM_TABLE = {
4 "other": "other",
5 "repeater": "repeater",
6 "bridge": "bridge",
7 "wlanaccesspoint": "wlan-access-point",
8 "router": "router",
9 "telephone": "telephone",
10 "docsis": "docsis-cable-device",
11 "station": "station",
12 }
13
[end of napalm/eos/constants.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/napalm/eos/constants.py b/napalm/eos/constants.py
--- a/napalm/eos/constants.py
+++ b/napalm/eos/constants.py
@@ -9,4 +9,5 @@
"telephone": "telephone",
"docsis": "docsis-cable-device",
"station": "station",
+ "stationonly": "station",
}
|
{"golden_diff": "diff --git a/napalm/eos/constants.py b/napalm/eos/constants.py\n--- a/napalm/eos/constants.py\n+++ b/napalm/eos/constants.py\n@@ -9,4 +9,5 @@\n \"telephone\": \"telephone\",\n \"docsis\": \"docsis-cable-device\",\n \"station\": \"station\",\n+ \"stationonly\": \"station\",\n }\n", "issue": "`get_lldp_neighbors_detail()` fails on Arista 7150S\n```python\r\nIn [1]: from napalm.eos import EOSDriver\r\n\r\nIn [2]: from getpass import getpass\r\n\r\nIn [3]: with EOSDriver(\"arista\", \"bewing\", getpass()) as d:\r\n ...: print(d.get_lldp_neighbors_detail())\r\n ...:\r\nPassword:\r\n---------------------------------------------------------------------------\r\nKeyError Traceback (most recent call last)\r\n<ipython-input-3-85f875e30fe3> in <module>\r\n 1 with EOSDriver(\"arista\", \"bewing\", getpass()) as d:\r\n----> 2 print(d.get_lldp_neighbors_detail())\r\n 3\r\n\r\n/mnt/c/Users/bewing/PycharmProjects/napalm/napalm/eos/eos.py in get_lldp_neighbors_detail(self, interface)\r\n 647 lldp_neighbors_out[interface] = []\r\n 648 capabilities = neighbor.get(\"systemCapabilities\", {})\r\n--> 649 available_capabilities = self._transform_lldp_capab(capabilities.keys())\r\n 650 enabled_capabilities = self._transform_lldp_capab(\r\n 651 [capab for capab, enabled in capabilities.items() if enabled]\r\n\r\n/mnt/c/Users/bewing/PycharmProjects/napalm/napalm/eos/eos.py in _transform_lldp_capab(self, capabilities)\r\n 616\r\n 617 def _transform_lldp_capab(self, capabilities):\r\n--> 618 return sorted([LLDP_CAPAB_TRANFORM_TABLE[c.lower()] for c in capabilities])\r\n 619\r\n 620 def get_lldp_neighbors_detail(self, interface=\"\"):\r\n\r\n/mnt/c/Users/bewing/PycharmProjects/napalm/napalm/eos/eos.py in <listcomp>(.0)\r\n 616\r\n 617 def _transform_lldp_capab(self, capabilities):\r\n--> 618 return sorted([LLDP_CAPAB_TRANFORM_TABLE[c.lower()] for c in capabilities])\r\n 619\r\n 620 def get_lldp_neighbors_detail(self, interface=\"\"):\r\n\r\nKeyError: 'stationonly'\r\n```\n", "before_files": [{"content": "# Based on:\n# https://code.getnoc.com/noc/noc/blob/6f3db2a6e4b1ece77aaf4c4c98413e35ff64643a/sa/profiles/Arista/EOS/get_lldp_neighbors.py#L76-79\nLLDP_CAPAB_TRANFORM_TABLE = {\n \"other\": \"other\",\n \"repeater\": \"repeater\",\n \"bridge\": \"bridge\",\n \"wlanaccesspoint\": \"wlan-access-point\",\n \"router\": \"router\",\n \"telephone\": \"telephone\",\n \"docsis\": \"docsis-cable-device\",\n \"station\": \"station\",\n}\n", "path": "napalm/eos/constants.py"}]}
| 1,208 | 86 |
gh_patches_debug_25892
|
rasdani/github-patches
|
git_diff
|
GPflow__GPflow-1582
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: Predictions with full_cov=True
The functions `model.predict_y()` and `model.predict_density()` do not work nicely with `full_cov=True`. For example, take `GPR`, where the `Gaussian` likelihood is used. In `predict_y()` the likelihood function `predict_mean_and_var()` is called. Here a scalar value of the likelihood variance is added to the covariance matrix. For `full_cov=True`, this should be added only to the diagonal.
I'm not sure what the right resolution for this is. In the short term, maybe throwing an error for `full_cov=True` would be helpful to show that this is unsupported.
Bug: Predictions with full_cov=True
The functions `model.predict_y()` and `model.predict_density()` do not work nicely with `full_cov=True`. For example, take `GPR`, where the `Gaussian` likelihood is used. In `predict_y()` the likelihood function `predict_mean_and_var()` is called. Here a scalar value of the likelihood variance is added to the covariance matrix. For `full_cov=True`, this should be added only to the diagonal.
I'm not sure what the right resolution for this is. In the short term, maybe throwing an error for `full_cov=True` would be helpful to show that this is unsupported.
</issue>
<code>
[start of gpflow/models/model.py]
1 # Copyright 2016-2020 The GPflow Contributors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import abc
16 from typing import Optional, Tuple
17
18 import tensorflow as tf
19
20 from ..base import Module
21 from ..conditionals.util import sample_mvn
22 from ..kernels import Kernel, MultioutputKernel
23 from ..likelihoods import Likelihood, SwitchedLikelihood
24 from ..mean_functions import MeanFunction, Zero
25 from ..utilities import to_default_float
26 from .training_mixins import InputData, RegressionData
27
28 MeanAndVariance = Tuple[tf.Tensor, tf.Tensor]
29
30
31 class BayesianModel(Module, metaclass=abc.ABCMeta):
32 """ Bayesian model. """
33
34 def log_prior_density(self) -> tf.Tensor:
35 """
36 Sum of the log prior probability densities of all (constrained) variables in this model.
37 """
38 if self.trainable_parameters:
39 return tf.add_n([p.log_prior_density() for p in self.trainable_parameters])
40 else:
41 return to_default_float(0.0)
42
43 def log_posterior_density(self, *args, **kwargs) -> tf.Tensor:
44 """
45 This may be the posterior with respect to the hyperparameters (e.g. for
46 GPR) or the posterior with respect to the function (e.g. for GPMC and
47 SGPMC). It assumes that maximum_log_likelihood_objective() is defined
48 sensibly.
49 """
50 return self.maximum_log_likelihood_objective(*args, **kwargs) + self.log_prior_density()
51
52 def _training_loss(self, *args, **kwargs) -> tf.Tensor:
53 """
54 Training loss definition. To allow MAP (maximum a-posteriori) estimation,
55 adds the log density of all priors to maximum_log_likelihood_objective().
56 """
57 return -(self.maximum_log_likelihood_objective(*args, **kwargs) + self.log_prior_density())
58
59 @abc.abstractmethod
60 def maximum_log_likelihood_objective(self, *args, **kwargs) -> tf.Tensor:
61 """
62 Objective for maximum likelihood estimation. Should be maximized. E.g.
63 log-marginal likelihood (hyperparameter likelihood) for GPR, or lower
64 bound to the log-marginal likelihood (ELBO) for sparse and variational
65 GPs.
66 """
67 raise NotImplementedError
68
69
70 class GPModel(BayesianModel):
71 r"""
72 A stateless base class for Gaussian process models, that is, those of the
73 form
74
75 .. math::
76 :nowrap:
77
78 \begin{align}
79 \theta & \sim p(\theta) \\
80 f & \sim \mathcal{GP}(m(x), k(x, x'; \theta)) \\
81 f_i & = f(x_i) \\
82 y_i \,|\, f_i & \sim p(y_i|f_i)
83 \end{align}
84
85 This class mostly adds functionality for predictions. To use it, inheriting
86 classes must define a predict_f function, which computes the means and
87 variances of the latent function.
88
89 These predictions are then pushed through the likelihood to obtain means
90 and variances of held out data, self.predict_y.
91
92 The predictions can also be used to compute the (log) density of held-out
93 data via self.predict_log_density.
94
95 It is also possible to draw samples from the latent GPs using
96 self.predict_f_samples.
97 """
98
99 def __init__(
100 self,
101 kernel: Kernel,
102 likelihood: Likelihood,
103 mean_function: Optional[MeanFunction] = None,
104 num_latent_gps: int = None,
105 ):
106 super().__init__()
107 assert num_latent_gps is not None, "GPModel requires specification of num_latent_gps"
108 self.num_latent_gps = num_latent_gps
109 if mean_function is None:
110 mean_function = Zero()
111 self.mean_function = mean_function
112 self.kernel = kernel
113 self.likelihood = likelihood
114
115 @staticmethod
116 def calc_num_latent_gps_from_data(data, kernel: Kernel, likelihood: Likelihood) -> int:
117 """
118 Calculates the number of latent GPs required based on the data as well
119 as the type of kernel and likelihood.
120 """
121 _, Y = data
122 output_dim = Y.shape[-1]
123 return GPModel.calc_num_latent_gps(kernel, likelihood, output_dim)
124
125 @staticmethod
126 def calc_num_latent_gps(kernel: Kernel, likelihood: Likelihood, output_dim: int) -> int:
127 """
128 Calculates the number of latent GPs required given the number of
129 outputs `output_dim` and the type of likelihood and kernel.
130
131 Note: It's not nice for `GPModel` to need to be aware of specific
132 likelihoods as here. However, `num_latent_gps` is a bit more broken in
133 general, we should fix this in the future. There are also some slightly
134 problematic assumptions re the output dimensions of mean_function.
135 See https://github.com/GPflow/GPflow/issues/1343
136 """
137 if isinstance(kernel, MultioutputKernel):
138 # MultioutputKernels already have num_latent_gps attributes
139 num_latent_gps = kernel.num_latent_gps
140 elif isinstance(likelihood, SwitchedLikelihood):
141 # the SwitchedLikelihood partitions/stitches based on the last
142 # column in Y, but we should not add a separate latent GP for this!
143 # hence decrement by 1
144 num_latent_gps = output_dim - 1
145 assert num_latent_gps > 0
146 else:
147 num_latent_gps = output_dim
148
149 return num_latent_gps
150
151 @abc.abstractmethod
152 def predict_f(
153 self, Xnew: InputData, full_cov: bool = False, full_output_cov: bool = False
154 ) -> MeanAndVariance:
155 raise NotImplementedError
156
157 def predict_f_samples(
158 self,
159 Xnew: InputData,
160 num_samples: Optional[int] = None,
161 full_cov: bool = True,
162 full_output_cov: bool = False,
163 ) -> tf.Tensor:
164 """
165 Produce samples from the posterior latent function(s) at the input points.
166
167 :param Xnew: InputData
168 Input locations at which to draw samples, shape [..., N, D]
169 where N is the number of rows and D is the input dimension of each point.
170 :param num_samples:
171 Number of samples to draw.
172 If `None`, a single sample is drawn and the return shape is [..., N, P],
173 for any positive integer the return shape contains an extra batch
174 dimension, [..., S, N, P], with S = num_samples and P is the number of outputs.
175 :param full_cov:
176 If True, draw correlated samples over the inputs. Computes the Cholesky over the
177 dense covariance matrix of size [num_data, num_data].
178 If False, draw samples that are uncorrelated over the inputs.
179 :param full_output_cov:
180 If True, draw correlated samples over the outputs.
181 If False, draw samples that are uncorrelated over the outputs.
182
183 Currently, the method does not support `full_output_cov=True` and `full_cov=True`.
184 """
185 if full_cov and full_output_cov:
186 raise NotImplementedError(
187 "The combination of both `full_cov` and `full_output_cov` is not supported."
188 )
189
190 # check below for shape info
191 mean, cov = self.predict_f(Xnew, full_cov=full_cov, full_output_cov=full_output_cov)
192 if full_cov:
193 # mean: [..., N, P]
194 # cov: [..., P, N, N]
195 mean_for_sample = tf.linalg.adjoint(mean) # [..., P, N]
196 samples = sample_mvn(
197 mean_for_sample, cov, full_cov, num_samples=num_samples
198 ) # [..., (S), P, N]
199 samples = tf.linalg.adjoint(samples) # [..., (S), N, P]
200 else:
201 # mean: [..., N, P]
202 # cov: [..., N, P] or [..., N, P, P]
203 samples = sample_mvn(
204 mean, cov, full_output_cov, num_samples=num_samples
205 ) # [..., (S), N, P]
206 return samples # [..., (S), N, P]
207
208 def predict_y(
209 self, Xnew: InputData, full_cov: bool = False, full_output_cov: bool = False
210 ) -> MeanAndVariance:
211 """
212 Compute the mean and variance of the held-out data at the input points.
213 """
214 f_mean, f_var = self.predict_f(Xnew, full_cov=full_cov, full_output_cov=full_output_cov)
215 return self.likelihood.predict_mean_and_var(f_mean, f_var)
216
217 def predict_log_density(
218 self, data: RegressionData, full_cov: bool = False, full_output_cov: bool = False
219 ) -> tf.Tensor:
220 """
221 Compute the log density of the data at the new data points.
222 """
223 X, Y = data
224 f_mean, f_var = self.predict_f(X, full_cov=full_cov, full_output_cov=full_output_cov)
225 return self.likelihood.predict_log_density(f_mean, f_var, Y)
226
[end of gpflow/models/model.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gpflow/models/model.py b/gpflow/models/model.py
--- a/gpflow/models/model.py
+++ b/gpflow/models/model.py
@@ -211,6 +211,12 @@
"""
Compute the mean and variance of the held-out data at the input points.
"""
+ if full_cov or full_output_cov:
+ # See https://github.com/GPflow/GPflow/issues/1461
+ raise NotImplementedError(
+ "The predict_y method currently supports only the argument values full_cov=False and full_output_cov=False"
+ )
+
f_mean, f_var = self.predict_f(Xnew, full_cov=full_cov, full_output_cov=full_output_cov)
return self.likelihood.predict_mean_and_var(f_mean, f_var)
@@ -220,6 +226,12 @@
"""
Compute the log density of the data at the new data points.
"""
+ if full_cov or full_output_cov:
+ # See https://github.com/GPflow/GPflow/issues/1461
+ raise NotImplementedError(
+ "The predict_log_density method currently supports only the argument values full_cov=False and full_output_cov=False"
+ )
+
X, Y = data
f_mean, f_var = self.predict_f(X, full_cov=full_cov, full_output_cov=full_output_cov)
return self.likelihood.predict_log_density(f_mean, f_var, Y)
|
{"golden_diff": "diff --git a/gpflow/models/model.py b/gpflow/models/model.py\n--- a/gpflow/models/model.py\n+++ b/gpflow/models/model.py\n@@ -211,6 +211,12 @@\n \"\"\"\n Compute the mean and variance of the held-out data at the input points.\n \"\"\"\n+ if full_cov or full_output_cov:\n+ # See https://github.com/GPflow/GPflow/issues/1461\n+ raise NotImplementedError(\n+ \"The predict_y method currently supports only the argument values full_cov=False and full_output_cov=False\"\n+ )\n+\n f_mean, f_var = self.predict_f(Xnew, full_cov=full_cov, full_output_cov=full_output_cov)\n return self.likelihood.predict_mean_and_var(f_mean, f_var)\n \n@@ -220,6 +226,12 @@\n \"\"\"\n Compute the log density of the data at the new data points.\n \"\"\"\n+ if full_cov or full_output_cov:\n+ # See https://github.com/GPflow/GPflow/issues/1461\n+ raise NotImplementedError(\n+ \"The predict_log_density method currently supports only the argument values full_cov=False and full_output_cov=False\"\n+ )\n+\n X, Y = data\n f_mean, f_var = self.predict_f(X, full_cov=full_cov, full_output_cov=full_output_cov)\n return self.likelihood.predict_log_density(f_mean, f_var, Y)\n", "issue": "Bug: Predictions with full_cov=True\nThe functions `model.predict_y()` and `model.predict_density()` do not work nicely with `full_cov=True`. For example, take `GPR`, where the `Gaussian` likelihood is used. In `predict_y()` the likelihood function `predict_mean_and_var()` is called. Here a scalar value of the likelihood variance is added to the covariance matrix. For `full_cov=True`, this should be added only to the diagonal.\r\n\r\nI'm not sure what the right resolution for this is. In the short term, maybe throwing an error for `full_cov=True` would be helpful to show that this is unsupported.\nBug: Predictions with full_cov=True\nThe functions `model.predict_y()` and `model.predict_density()` do not work nicely with `full_cov=True`. For example, take `GPR`, where the `Gaussian` likelihood is used. In `predict_y()` the likelihood function `predict_mean_and_var()` is called. Here a scalar value of the likelihood variance is added to the covariance matrix. For `full_cov=True`, this should be added only to the diagonal.\r\n\r\nI'm not sure what the right resolution for this is. In the short term, maybe throwing an error for `full_cov=True` would be helpful to show that this is unsupported.\n", "before_files": [{"content": "# Copyright 2016-2020 The GPflow Contributors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport abc\nfrom typing import Optional, Tuple\n\nimport tensorflow as tf\n\nfrom ..base import Module\nfrom ..conditionals.util import sample_mvn\nfrom ..kernels import Kernel, MultioutputKernel\nfrom ..likelihoods import Likelihood, SwitchedLikelihood\nfrom ..mean_functions import MeanFunction, Zero\nfrom ..utilities import to_default_float\nfrom .training_mixins import InputData, RegressionData\n\nMeanAndVariance = Tuple[tf.Tensor, tf.Tensor]\n\n\nclass BayesianModel(Module, metaclass=abc.ABCMeta):\n \"\"\" Bayesian model. \"\"\"\n\n def log_prior_density(self) -> tf.Tensor:\n \"\"\"\n Sum of the log prior probability densities of all (constrained) variables in this model.\n \"\"\"\n if self.trainable_parameters:\n return tf.add_n([p.log_prior_density() for p in self.trainable_parameters])\n else:\n return to_default_float(0.0)\n\n def log_posterior_density(self, *args, **kwargs) -> tf.Tensor:\n \"\"\"\n This may be the posterior with respect to the hyperparameters (e.g. for\n GPR) or the posterior with respect to the function (e.g. for GPMC and\n SGPMC). It assumes that maximum_log_likelihood_objective() is defined\n sensibly.\n \"\"\"\n return self.maximum_log_likelihood_objective(*args, **kwargs) + self.log_prior_density()\n\n def _training_loss(self, *args, **kwargs) -> tf.Tensor:\n \"\"\"\n Training loss definition. To allow MAP (maximum a-posteriori) estimation,\n adds the log density of all priors to maximum_log_likelihood_objective().\n \"\"\"\n return -(self.maximum_log_likelihood_objective(*args, **kwargs) + self.log_prior_density())\n\n @abc.abstractmethod\n def maximum_log_likelihood_objective(self, *args, **kwargs) -> tf.Tensor:\n \"\"\"\n Objective for maximum likelihood estimation. Should be maximized. E.g.\n log-marginal likelihood (hyperparameter likelihood) for GPR, or lower\n bound to the log-marginal likelihood (ELBO) for sparse and variational\n GPs.\n \"\"\"\n raise NotImplementedError\n\n\nclass GPModel(BayesianModel):\n r\"\"\"\n A stateless base class for Gaussian process models, that is, those of the\n form\n\n .. math::\n :nowrap:\n\n \\begin{align}\n \\theta & \\sim p(\\theta) \\\\\n f & \\sim \\mathcal{GP}(m(x), k(x, x'; \\theta)) \\\\\n f_i & = f(x_i) \\\\\n y_i \\,|\\, f_i & \\sim p(y_i|f_i)\n \\end{align}\n\n This class mostly adds functionality for predictions. To use it, inheriting\n classes must define a predict_f function, which computes the means and\n variances of the latent function.\n\n These predictions are then pushed through the likelihood to obtain means\n and variances of held out data, self.predict_y.\n\n The predictions can also be used to compute the (log) density of held-out\n data via self.predict_log_density.\n\n It is also possible to draw samples from the latent GPs using\n self.predict_f_samples.\n \"\"\"\n\n def __init__(\n self,\n kernel: Kernel,\n likelihood: Likelihood,\n mean_function: Optional[MeanFunction] = None,\n num_latent_gps: int = None,\n ):\n super().__init__()\n assert num_latent_gps is not None, \"GPModel requires specification of num_latent_gps\"\n self.num_latent_gps = num_latent_gps\n if mean_function is None:\n mean_function = Zero()\n self.mean_function = mean_function\n self.kernel = kernel\n self.likelihood = likelihood\n\n @staticmethod\n def calc_num_latent_gps_from_data(data, kernel: Kernel, likelihood: Likelihood) -> int:\n \"\"\"\n Calculates the number of latent GPs required based on the data as well\n as the type of kernel and likelihood.\n \"\"\"\n _, Y = data\n output_dim = Y.shape[-1]\n return GPModel.calc_num_latent_gps(kernel, likelihood, output_dim)\n\n @staticmethod\n def calc_num_latent_gps(kernel: Kernel, likelihood: Likelihood, output_dim: int) -> int:\n \"\"\"\n Calculates the number of latent GPs required given the number of\n outputs `output_dim` and the type of likelihood and kernel.\n\n Note: It's not nice for `GPModel` to need to be aware of specific\n likelihoods as here. However, `num_latent_gps` is a bit more broken in\n general, we should fix this in the future. There are also some slightly\n problematic assumptions re the output dimensions of mean_function.\n See https://github.com/GPflow/GPflow/issues/1343\n \"\"\"\n if isinstance(kernel, MultioutputKernel):\n # MultioutputKernels already have num_latent_gps attributes\n num_latent_gps = kernel.num_latent_gps\n elif isinstance(likelihood, SwitchedLikelihood):\n # the SwitchedLikelihood partitions/stitches based on the last\n # column in Y, but we should not add a separate latent GP for this!\n # hence decrement by 1\n num_latent_gps = output_dim - 1\n assert num_latent_gps > 0\n else:\n num_latent_gps = output_dim\n\n return num_latent_gps\n\n @abc.abstractmethod\n def predict_f(\n self, Xnew: InputData, full_cov: bool = False, full_output_cov: bool = False\n ) -> MeanAndVariance:\n raise NotImplementedError\n\n def predict_f_samples(\n self,\n Xnew: InputData,\n num_samples: Optional[int] = None,\n full_cov: bool = True,\n full_output_cov: bool = False,\n ) -> tf.Tensor:\n \"\"\"\n Produce samples from the posterior latent function(s) at the input points.\n\n :param Xnew: InputData\n Input locations at which to draw samples, shape [..., N, D]\n where N is the number of rows and D is the input dimension of each point.\n :param num_samples:\n Number of samples to draw.\n If `None`, a single sample is drawn and the return shape is [..., N, P],\n for any positive integer the return shape contains an extra batch\n dimension, [..., S, N, P], with S = num_samples and P is the number of outputs.\n :param full_cov:\n If True, draw correlated samples over the inputs. Computes the Cholesky over the\n dense covariance matrix of size [num_data, num_data].\n If False, draw samples that are uncorrelated over the inputs.\n :param full_output_cov:\n If True, draw correlated samples over the outputs.\n If False, draw samples that are uncorrelated over the outputs.\n\n Currently, the method does not support `full_output_cov=True` and `full_cov=True`.\n \"\"\"\n if full_cov and full_output_cov:\n raise NotImplementedError(\n \"The combination of both `full_cov` and `full_output_cov` is not supported.\"\n )\n\n # check below for shape info\n mean, cov = self.predict_f(Xnew, full_cov=full_cov, full_output_cov=full_output_cov)\n if full_cov:\n # mean: [..., N, P]\n # cov: [..., P, N, N]\n mean_for_sample = tf.linalg.adjoint(mean) # [..., P, N]\n samples = sample_mvn(\n mean_for_sample, cov, full_cov, num_samples=num_samples\n ) # [..., (S), P, N]\n samples = tf.linalg.adjoint(samples) # [..., (S), N, P]\n else:\n # mean: [..., N, P]\n # cov: [..., N, P] or [..., N, P, P]\n samples = sample_mvn(\n mean, cov, full_output_cov, num_samples=num_samples\n ) # [..., (S), N, P]\n return samples # [..., (S), N, P]\n\n def predict_y(\n self, Xnew: InputData, full_cov: bool = False, full_output_cov: bool = False\n ) -> MeanAndVariance:\n \"\"\"\n Compute the mean and variance of the held-out data at the input points.\n \"\"\"\n f_mean, f_var = self.predict_f(Xnew, full_cov=full_cov, full_output_cov=full_output_cov)\n return self.likelihood.predict_mean_and_var(f_mean, f_var)\n\n def predict_log_density(\n self, data: RegressionData, full_cov: bool = False, full_output_cov: bool = False\n ) -> tf.Tensor:\n \"\"\"\n Compute the log density of the data at the new data points.\n \"\"\"\n X, Y = data\n f_mean, f_var = self.predict_f(X, full_cov=full_cov, full_output_cov=full_output_cov)\n return self.likelihood.predict_log_density(f_mean, f_var, Y)\n", "path": "gpflow/models/model.py"}]}
| 3,493 | 321 |
gh_patches_debug_34642
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-1571
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tracer and Meter provider lack environment variables
Now that `Configuration` has been removed, both providers should use consistent environment variables.
</issue>
<code>
[start of opentelemetry-api/src/opentelemetry/environment_variables/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 OTEL_PROPAGATORS = "OTEL_PROPAGATORS"
16 OTEL_PYTHON_CONTEXT = "OTEL_PYTHON_CONTEXT"
17 OTEL_PYTHON_DISABLED_INSTRUMENTATIONS = "OTEL_PYTHON_DISABLED_INSTRUMENTATIONS"
18 OTEL_PYTHON_IDS_GENERATOR = "OTEL_PYTHON_IDS_GENERATOR"
19 OTEL_PYTHON_SERVICE_NAME = "OTEL_PYTHON_SERVICE_NAME"
20 OTEL_TRACE_EXPORTER = "OTEL_TRACE_EXPORTER"
21 OTEL_METRICS_EXPORTER = "OTEL_METRICS_EXPORTER"
22
[end of opentelemetry-api/src/opentelemetry/environment_variables/__init__.py]
[start of opentelemetry-api/src/opentelemetry/util/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import time
16 from logging import getLogger
17 from os import environ
18 from typing import TYPE_CHECKING, Union, cast
19
20 from pkg_resources import iter_entry_points
21
22 if TYPE_CHECKING:
23 from opentelemetry.metrics import MeterProvider
24 from opentelemetry.trace import TracerProvider
25
26 Provider = Union["TracerProvider", "MeterProvider"]
27
28 logger = getLogger(__name__)
29
30 # Since we want API users to be able to provide timestamps,
31 # this needs to be in the API.
32
33 try:
34 time_ns = time.time_ns
35 # Python versions < 3.7
36 except AttributeError:
37
38 def time_ns() -> int:
39 return int(time.time() * 1e9)
40
41
42 def _load_provider(provider: str) -> Provider:
43 try:
44 entry_point = next(
45 iter_entry_points(
46 "opentelemetry_{}".format(provider),
47 name=cast(
48 str,
49 environ.get(
50 provider.upper(), "default_{}".format(provider),
51 ),
52 ),
53 )
54 )
55 return cast(Provider, entry_point.load()(),)
56 except Exception: # pylint: disable=broad-except
57 logger.error("Failed to load configured provider %s", provider)
58 raise
59
60
61 def _load_meter_provider(provider: str) -> "MeterProvider":
62 return cast("MeterProvider", _load_provider(provider))
63
64
65 def _load_trace_provider(provider: str) -> "TracerProvider":
66 return cast("TracerProvider", _load_provider(provider))
67
[end of opentelemetry-api/src/opentelemetry/util/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/opentelemetry-api/src/opentelemetry/environment_variables/__init__.py b/opentelemetry-api/src/opentelemetry/environment_variables/__init__.py
--- a/opentelemetry-api/src/opentelemetry/environment_variables/__init__.py
+++ b/opentelemetry-api/src/opentelemetry/environment_variables/__init__.py
@@ -19,3 +19,5 @@
OTEL_PYTHON_SERVICE_NAME = "OTEL_PYTHON_SERVICE_NAME"
OTEL_TRACE_EXPORTER = "OTEL_TRACE_EXPORTER"
OTEL_METRICS_EXPORTER = "OTEL_METRICS_EXPORTER"
+OTEL_PYTHON_TRACER_PROVIDER = "OTEL_PYTHON_TRACER_PROVIDER"
+OTEL_PYTHON_METER_PROVIDER = "OTEL_PYTHON_METER_PROVIDER"
diff --git a/opentelemetry-api/src/opentelemetry/util/__init__.py b/opentelemetry-api/src/opentelemetry/util/__init__.py
--- a/opentelemetry-api/src/opentelemetry/util/__init__.py
+++ b/opentelemetry-api/src/opentelemetry/util/__init__.py
@@ -19,6 +19,11 @@
from pkg_resources import iter_entry_points
+from opentelemetry.environment_variables import (
+ OTEL_PYTHON_METER_PROVIDER,
+ OTEL_PYTHON_TRACER_PROVIDER,
+)
+
if TYPE_CHECKING:
from opentelemetry.metrics import MeterProvider
from opentelemetry.trace import TracerProvider
@@ -39,7 +44,9 @@
return int(time.time() * 1e9)
-def _load_provider(provider: str) -> Provider:
+def _load_provider(
+ provider_environment_variable: str, provider: str
+) -> Provider:
try:
entry_point = next(
iter_entry_points(
@@ -47,7 +54,8 @@
name=cast(
str,
environ.get(
- provider.upper(), "default_{}".format(provider),
+ provider_environment_variable,
+ "default_{}".format(provider),
),
),
)
@@ -59,8 +67,13 @@
def _load_meter_provider(provider: str) -> "MeterProvider":
- return cast("MeterProvider", _load_provider(provider))
+ return cast(
+ "MeterProvider", _load_provider(OTEL_PYTHON_METER_PROVIDER, provider),
+ )
def _load_trace_provider(provider: str) -> "TracerProvider":
- return cast("TracerProvider", _load_provider(provider))
+ return cast(
+ "TracerProvider",
+ _load_provider(OTEL_PYTHON_TRACER_PROVIDER, provider),
+ )
|
{"golden_diff": "diff --git a/opentelemetry-api/src/opentelemetry/environment_variables/__init__.py b/opentelemetry-api/src/opentelemetry/environment_variables/__init__.py\n--- a/opentelemetry-api/src/opentelemetry/environment_variables/__init__.py\n+++ b/opentelemetry-api/src/opentelemetry/environment_variables/__init__.py\n@@ -19,3 +19,5 @@\n OTEL_PYTHON_SERVICE_NAME = \"OTEL_PYTHON_SERVICE_NAME\"\n OTEL_TRACE_EXPORTER = \"OTEL_TRACE_EXPORTER\"\n OTEL_METRICS_EXPORTER = \"OTEL_METRICS_EXPORTER\"\n+OTEL_PYTHON_TRACER_PROVIDER = \"OTEL_PYTHON_TRACER_PROVIDER\"\n+OTEL_PYTHON_METER_PROVIDER = \"OTEL_PYTHON_METER_PROVIDER\"\ndiff --git a/opentelemetry-api/src/opentelemetry/util/__init__.py b/opentelemetry-api/src/opentelemetry/util/__init__.py\n--- a/opentelemetry-api/src/opentelemetry/util/__init__.py\n+++ b/opentelemetry-api/src/opentelemetry/util/__init__.py\n@@ -19,6 +19,11 @@\n \n from pkg_resources import iter_entry_points\n \n+from opentelemetry.environment_variables import (\n+ OTEL_PYTHON_METER_PROVIDER,\n+ OTEL_PYTHON_TRACER_PROVIDER,\n+)\n+\n if TYPE_CHECKING:\n from opentelemetry.metrics import MeterProvider\n from opentelemetry.trace import TracerProvider\n@@ -39,7 +44,9 @@\n return int(time.time() * 1e9)\n \n \n-def _load_provider(provider: str) -> Provider:\n+def _load_provider(\n+ provider_environment_variable: str, provider: str\n+) -> Provider:\n try:\n entry_point = next(\n iter_entry_points(\n@@ -47,7 +54,8 @@\n name=cast(\n str,\n environ.get(\n- provider.upper(), \"default_{}\".format(provider),\n+ provider_environment_variable,\n+ \"default_{}\".format(provider),\n ),\n ),\n )\n@@ -59,8 +67,13 @@\n \n \n def _load_meter_provider(provider: str) -> \"MeterProvider\":\n- return cast(\"MeterProvider\", _load_provider(provider))\n+ return cast(\n+ \"MeterProvider\", _load_provider(OTEL_PYTHON_METER_PROVIDER, provider),\n+ )\n \n \n def _load_trace_provider(provider: str) -> \"TracerProvider\":\n- return cast(\"TracerProvider\", _load_provider(provider))\n+ return cast(\n+ \"TracerProvider\",\n+ _load_provider(OTEL_PYTHON_TRACER_PROVIDER, provider),\n+ )\n", "issue": "Tracer and Meter provider lack environment variables\nNow that `Configuration` has been removed, both providers should use consistent environment variables.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nOTEL_PROPAGATORS = \"OTEL_PROPAGATORS\"\nOTEL_PYTHON_CONTEXT = \"OTEL_PYTHON_CONTEXT\"\nOTEL_PYTHON_DISABLED_INSTRUMENTATIONS = \"OTEL_PYTHON_DISABLED_INSTRUMENTATIONS\"\nOTEL_PYTHON_IDS_GENERATOR = \"OTEL_PYTHON_IDS_GENERATOR\"\nOTEL_PYTHON_SERVICE_NAME = \"OTEL_PYTHON_SERVICE_NAME\"\nOTEL_TRACE_EXPORTER = \"OTEL_TRACE_EXPORTER\"\nOTEL_METRICS_EXPORTER = \"OTEL_METRICS_EXPORTER\"\n", "path": "opentelemetry-api/src/opentelemetry/environment_variables/__init__.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport time\nfrom logging import getLogger\nfrom os import environ\nfrom typing import TYPE_CHECKING, Union, cast\n\nfrom pkg_resources import iter_entry_points\n\nif TYPE_CHECKING:\n from opentelemetry.metrics import MeterProvider\n from opentelemetry.trace import TracerProvider\n\nProvider = Union[\"TracerProvider\", \"MeterProvider\"]\n\nlogger = getLogger(__name__)\n\n# Since we want API users to be able to provide timestamps,\n# this needs to be in the API.\n\ntry:\n time_ns = time.time_ns\n# Python versions < 3.7\nexcept AttributeError:\n\n def time_ns() -> int:\n return int(time.time() * 1e9)\n\n\ndef _load_provider(provider: str) -> Provider:\n try:\n entry_point = next(\n iter_entry_points(\n \"opentelemetry_{}\".format(provider),\n name=cast(\n str,\n environ.get(\n provider.upper(), \"default_{}\".format(provider),\n ),\n ),\n )\n )\n return cast(Provider, entry_point.load()(),)\n except Exception: # pylint: disable=broad-except\n logger.error(\"Failed to load configured provider %s\", provider)\n raise\n\n\ndef _load_meter_provider(provider: str) -> \"MeterProvider\":\n return cast(\"MeterProvider\", _load_provider(provider))\n\n\ndef _load_trace_provider(provider: str) -> \"TracerProvider\":\n return cast(\"TracerProvider\", _load_provider(provider))\n", "path": "opentelemetry-api/src/opentelemetry/util/__init__.py"}]}
| 1,453 | 566 |
gh_patches_debug_18009
|
rasdani/github-patches
|
git_diff
|
kivy__kivy-934
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
No console warning when setting Image.source = "some/file/that/does/not/exist"
When you load a file into an Image widget by setting .source = "some/file/name.png" there is a message printed on the console:
[DEBUG ] [ImagePygame ] Load <data/tiles/dirt.png>
If you set .source = to a filename that does not exist, there is no feedback. No debug message is printed, and the Image widget remains blank.
</issue>
<code>
[start of kivy/uix/image.py]
1 '''
2 Image
3 =====
4
5 The :class:`Image` widget is used to display an image::
6
7 wimg = Image(source='mylogo.png')
8
9 Asynchronous Loading
10 --------------------
11
12 To load an image asynchronously (for example from an external webserver), use
13 the :class:`AsyncImage` subclass::
14
15 aimg = AsyncImage(source='http://mywebsite.com/logo.png')
16
17 Alignment
18 ---------
19
20 By default, the image is centered and fit inside the widget bounding box.
21 If you don't want that, you can inherit from Image and create your own style.
22
23 For example, if you want your image to take the same size of your widget, you
24 can do::
25
26 class FullImage(Image):
27 pass
28
29 And in your kivy language file, you can do::
30
31 <FullImage>:
32 canvas:
33 Color:
34 rgb: (1, 1, 1)
35 Rectangle:
36 texture: self.texture
37 size: self.size
38 pos: self.pos
39
40 '''
41
42 __all__ = ('Image', 'AsyncImage')
43
44 from kivy.uix.widget import Widget
45 from kivy.core.image import Image as CoreImage
46 from kivy.resources import resource_find
47 from kivy.properties import StringProperty, ObjectProperty, ListProperty, \
48 AliasProperty, BooleanProperty, NumericProperty
49 from kivy.loader import Loader
50
51
52 class Image(Widget):
53 '''Image class, see module documentation for more information.
54 '''
55
56 source = StringProperty(None)
57 '''Filename / source of your image.
58
59 :data:`source` is a :class:`~kivy.properties.StringProperty`, default to
60 None.
61 '''
62
63 texture = ObjectProperty(None, allownone=True)
64 '''Texture object of the image.
65
66 Depending of the texture creation, the value will be a
67 :class:`~kivy.graphics.texture.Texture` or
68 :class:`~kivy.graphics.texture.TextureRegion` object.
69
70 :data:`texture` is a :class:`~kivy.properties.ObjectProperty`, default to
71 None.
72 '''
73
74 texture_size = ListProperty([0, 0])
75 '''Texture size of the image.
76
77 .. warning::
78
79 The texture size is set after the texture property. So if you listen on
80 the change to :data:`texture`, the property texture_size will not be
81 up-to-date. Use self.texture.size instead.
82 '''
83
84 def get_image_ratio(self):
85 if self.texture:
86 return self.texture.width / float(self.texture.height)
87 return 1.
88
89 mipmap = BooleanProperty(False)
90 '''Indicate if you want OpenGL mipmapping to be applied on the texture.
91 Read :ref:`mipmap` for more information.
92
93 .. versionadded:: 1.0.7
94
95 :data:`mipmap` is a :class:`~kivy.properties.BooleanProperty`, default to
96 False.
97 '''
98
99 image_ratio = AliasProperty(get_image_ratio, None, bind=('texture', ))
100 '''Ratio of the image (width / float(height).
101
102 :data:`image_ratio` is a :class:`~kivy.properties.AliasProperty`, and is
103 read-only.
104 '''
105
106 color = ListProperty([1, 1, 1, 1])
107 '''Image color, in the format (r, g, b, a). This attribute can be used to
108 'tint' an image. Be careful, if the source image is not gray/white, the
109 color will not really work as expected.
110
111 .. versionadded:: 1.0.6
112
113 :data:`color` is a :class:`~kivy.properties.ListProperty`, default to [1, 1,
114 1, 1].
115 '''
116
117 allow_stretch = BooleanProperty(False)
118 '''If True, the normalized image size will be maximized to fit in the image
119 box. Otherwise, if the box is too tall, the image will not be stretched more
120 than 1:1 pixels.
121
122 .. versionadded:: 1.0.7
123
124 :data:`allow_stretch` is a :class:`~kivy.properties.BooleanProperty`,
125 default to False
126 '''
127
128 keep_ratio = BooleanProperty(True)
129 '''If False along with allow_stretch being True, the normalized image
130 size will be maximized to fit in the image box, disregarding the aspect
131 ratio of the image.
132 Otherwise, if the box is too tall, the image will not be stretched more
133 than 1:1 pixels.
134
135 .. versionadded:: 1.0.8
136
137 :data:`keep_ratio` is a :class:`~kivy.properties.BooleanProperty`,
138 default to True
139 '''
140
141 keep_data = BooleanProperty(False)
142 '''If true the underlaying _coreimage have to keep the raw image data.
143 Useful to perform pixel based collision detection
144
145 .. versionadded:: 1.3.0
146
147 :data:`keep_data` is a :class:`~kivy.properties.BooleanProperty`, default
148 to False
149 '''
150
151 anim_delay = NumericProperty(.25)
152 '''Delay of animation if the image is sequenced (like an animated gif).
153 If the anim_delay is set to -1, the animation will be stopped.
154
155 .. versionadded:: 1.0.8
156
157 :data:`anim_delay` is a :class:`~kivy.properties.NumericProperty`, default
158 to .25 (4 FPS)
159 '''
160
161 def get_norm_image_size(self):
162 if not self.texture:
163 return self.size
164 ratio = self.image_ratio
165 w, h = self.size
166 tw, th = self.texture.size
167
168 # ensure that the width is always maximized to the containter width
169 if self.allow_stretch:
170 if not self.keep_ratio:
171 return w, h
172 iw = w
173 else:
174 iw = min(w, tw)
175 # calculate the appropriate height
176 ih = iw / ratio
177 # if the height is too higher, take the height of the container
178 # and calculate appropriate width. no need to test further. :)
179 if ih > h:
180 if self.allow_stretch:
181 ih = h
182 else:
183 ih = min(h, th)
184 iw = ih * ratio
185
186 return iw, ih
187
188 norm_image_size = AliasProperty(get_norm_image_size, None, bind=(
189 'texture', 'size', 'image_ratio', 'allow_stretch'))
190 '''Normalized image size within the widget box.
191
192 This size will always be fit to the widget size, and will preserve the image
193 ratio.
194
195 :data:`norm_image_size` is a :class:`~kivy.properties.AliasProperty`, and is
196 read-only.
197 '''
198
199 def __init__(self, **kwargs):
200 self._coreimage = None
201 super(Image, self).__init__(**kwargs)
202 self.bind(source=self.texture_update,
203 mipmap=self.texture_update)
204 if self.source:
205 self.texture_update()
206
207 def texture_update(self, *largs):
208 if not self.source:
209 self.texture = None
210 else:
211 filename = resource_find(self.source)
212 if filename is None:
213 return
214 mipmap = self.mipmap
215 if self._coreimage is not None:
216 self._coreimage.unbind(on_texture=self._on_tex_change)
217 self._coreimage = ci = CoreImage(filename, mipmap=mipmap,
218 anim_delay=self.anim_delay, keep_data=self.keep_data)
219 ci.bind(on_texture=self._on_tex_change)
220 self.texture = ci.texture
221
222 def on_anim_delay(self, instance, value):
223 if self._coreimage is None:
224 return
225 self._coreimage.anim_delay = value
226 if value < 0:
227 self._coreimage.anim_reset(False)
228
229 def on_texture(self, instance, value):
230 if value is not None:
231 self.texture_size = list(value.size)
232
233 def _on_tex_change(self, *largs):
234 # update texture from core image
235 self.texture = self._coreimage.texture
236
237 def reload(self):
238 '''Reload image from disk. This facilitates re-loading of
239 image from disk in case contents change.
240
241 .. versionadded:: 1.3.0
242
243 Usage::
244
245 im = Image(source = '1.jpg')
246 # -- do something --
247 im.reload()
248 # image will be re-loaded from disk
249
250 '''
251 self._coreimage.remove_from_cache()
252 olsource = self.source
253 self.source = ''
254 self.source = olsource
255
256
257 class AsyncImage(Image):
258 '''Asynchronous Image class. See module documentation for more information.
259 '''
260
261 def __init__(self, **kwargs):
262 self._coreimage = None
263 super(AsyncImage, self).__init__(**kwargs)
264 self.unbind(source=self.texture_update,
265 mipmap=self.texture_update)
266
267 def on_source(self, instance, value):
268 if not value:
269 if self._coreimage is not None:
270 self._coreimage.unbind(on_texture=self._on_tex_change)
271 self.texture = None
272 self._coreimage = None
273 else:
274 if not self.is_uri(value):
275 value = resource_find(value)
276 self._coreimage = image = Loader.image(value)
277 image.bind(on_load=self.on_source_load)
278 image.bind(on_texture=self._on_tex_change)
279 self.texture = image.texture
280
281 def on_source_load(self, value):
282 image = self._coreimage.image
283 if not image:
284 return
285 self.texture = image.texture
286
287 def is_uri(self, filename):
288 proto = filename.split('://', 1)[0]
289 return proto in ('http', 'https', 'ftp', 'smb')
290
291 def _on_tex_change(self, *largs):
292 if self._coreimage:
293 self.texture = self._coreimage.texture
294
[end of kivy/uix/image.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kivy/uix/image.py b/kivy/uix/image.py
--- a/kivy/uix/image.py
+++ b/kivy/uix/image.py
@@ -47,6 +47,7 @@
from kivy.properties import StringProperty, ObjectProperty, ListProperty, \
AliasProperty, BooleanProperty, NumericProperty
from kivy.loader import Loader
+from kivy.logger import Logger
class Image(Widget):
@@ -210,7 +211,8 @@
else:
filename = resource_find(self.source)
if filename is None:
- return
+ return Logger.error('Image: Error reading file {filename}'.
+ format(filename=self.source))
mipmap = self.mipmap
if self._coreimage is not None:
self._coreimage.unbind(on_texture=self._on_tex_change)
|
{"golden_diff": "diff --git a/kivy/uix/image.py b/kivy/uix/image.py\n--- a/kivy/uix/image.py\n+++ b/kivy/uix/image.py\n@@ -47,6 +47,7 @@\n from kivy.properties import StringProperty, ObjectProperty, ListProperty, \\\n AliasProperty, BooleanProperty, NumericProperty\n from kivy.loader import Loader\n+from kivy.logger import Logger\n \n \n class Image(Widget):\n@@ -210,7 +211,8 @@\n else:\n filename = resource_find(self.source)\n if filename is None:\n- return\n+ return Logger.error('Image: Error reading file {filename}'.\n+ format(filename=self.source))\n mipmap = self.mipmap\n if self._coreimage is not None:\n self._coreimage.unbind(on_texture=self._on_tex_change)\n", "issue": "No console warning when setting Image.source = \"some/file/that/does/not/exist\"\nWhen you load a file into an Image widget by setting .source = \"some/file/name.png\" there is a message printed on the console:\n\n[DEBUG ] [ImagePygame ] Load <data/tiles/dirt.png>\n\nIf you set .source = to a filename that does not exist, there is no feedback. No debug message is printed, and the Image widget remains blank.\n\n", "before_files": [{"content": "'''\nImage\n=====\n\nThe :class:`Image` widget is used to display an image::\n\n wimg = Image(source='mylogo.png')\n\nAsynchronous Loading\n--------------------\n\nTo load an image asynchronously (for example from an external webserver), use\nthe :class:`AsyncImage` subclass::\n\n aimg = AsyncImage(source='http://mywebsite.com/logo.png')\n\nAlignment\n---------\n\nBy default, the image is centered and fit inside the widget bounding box.\nIf you don't want that, you can inherit from Image and create your own style.\n\nFor example, if you want your image to take the same size of your widget, you\ncan do::\n\n class FullImage(Image):\n pass\n\nAnd in your kivy language file, you can do::\n\n <FullImage>:\n canvas:\n Color:\n rgb: (1, 1, 1)\n Rectangle:\n texture: self.texture\n size: self.size\n pos: self.pos\n\n'''\n\n__all__ = ('Image', 'AsyncImage')\n\nfrom kivy.uix.widget import Widget\nfrom kivy.core.image import Image as CoreImage\nfrom kivy.resources import resource_find\nfrom kivy.properties import StringProperty, ObjectProperty, ListProperty, \\\n AliasProperty, BooleanProperty, NumericProperty\nfrom kivy.loader import Loader\n\n\nclass Image(Widget):\n '''Image class, see module documentation for more information.\n '''\n\n source = StringProperty(None)\n '''Filename / source of your image.\n\n :data:`source` is a :class:`~kivy.properties.StringProperty`, default to\n None.\n '''\n\n texture = ObjectProperty(None, allownone=True)\n '''Texture object of the image.\n\n Depending of the texture creation, the value will be a\n :class:`~kivy.graphics.texture.Texture` or\n :class:`~kivy.graphics.texture.TextureRegion` object.\n\n :data:`texture` is a :class:`~kivy.properties.ObjectProperty`, default to\n None.\n '''\n\n texture_size = ListProperty([0, 0])\n '''Texture size of the image.\n\n .. warning::\n\n The texture size is set after the texture property. So if you listen on\n the change to :data:`texture`, the property texture_size will not be\n up-to-date. Use self.texture.size instead.\n '''\n\n def get_image_ratio(self):\n if self.texture:\n return self.texture.width / float(self.texture.height)\n return 1.\n\n mipmap = BooleanProperty(False)\n '''Indicate if you want OpenGL mipmapping to be applied on the texture.\n Read :ref:`mipmap` for more information.\n\n .. versionadded:: 1.0.7\n\n :data:`mipmap` is a :class:`~kivy.properties.BooleanProperty`, default to\n False.\n '''\n\n image_ratio = AliasProperty(get_image_ratio, None, bind=('texture', ))\n '''Ratio of the image (width / float(height).\n\n :data:`image_ratio` is a :class:`~kivy.properties.AliasProperty`, and is\n read-only.\n '''\n\n color = ListProperty([1, 1, 1, 1])\n '''Image color, in the format (r, g, b, a). This attribute can be used to\n 'tint' an image. Be careful, if the source image is not gray/white, the\n color will not really work as expected.\n\n .. versionadded:: 1.0.6\n\n :data:`color` is a :class:`~kivy.properties.ListProperty`, default to [1, 1,\n 1, 1].\n '''\n\n allow_stretch = BooleanProperty(False)\n '''If True, the normalized image size will be maximized to fit in the image\n box. Otherwise, if the box is too tall, the image will not be stretched more\n than 1:1 pixels.\n\n .. versionadded:: 1.0.7\n\n :data:`allow_stretch` is a :class:`~kivy.properties.BooleanProperty`,\n default to False\n '''\n\n keep_ratio = BooleanProperty(True)\n '''If False along with allow_stretch being True, the normalized image\n size will be maximized to fit in the image box, disregarding the aspect\n ratio of the image.\n Otherwise, if the box is too tall, the image will not be stretched more\n than 1:1 pixels.\n\n .. versionadded:: 1.0.8\n\n :data:`keep_ratio` is a :class:`~kivy.properties.BooleanProperty`,\n default to True\n '''\n\n keep_data = BooleanProperty(False)\n '''If true the underlaying _coreimage have to keep the raw image data.\n Useful to perform pixel based collision detection\n\n .. versionadded:: 1.3.0\n\n :data:`keep_data` is a :class:`~kivy.properties.BooleanProperty`, default\n to False\n '''\n\n anim_delay = NumericProperty(.25)\n '''Delay of animation if the image is sequenced (like an animated gif).\n If the anim_delay is set to -1, the animation will be stopped.\n\n .. versionadded:: 1.0.8\n\n :data:`anim_delay` is a :class:`~kivy.properties.NumericProperty`, default\n to .25 (4 FPS)\n '''\n\n def get_norm_image_size(self):\n if not self.texture:\n return self.size\n ratio = self.image_ratio\n w, h = self.size\n tw, th = self.texture.size\n\n # ensure that the width is always maximized to the containter width\n if self.allow_stretch:\n if not self.keep_ratio:\n return w, h\n iw = w\n else:\n iw = min(w, tw)\n # calculate the appropriate height\n ih = iw / ratio\n # if the height is too higher, take the height of the container\n # and calculate appropriate width. no need to test further. :)\n if ih > h:\n if self.allow_stretch:\n ih = h\n else:\n ih = min(h, th)\n iw = ih * ratio\n\n return iw, ih\n\n norm_image_size = AliasProperty(get_norm_image_size, None, bind=(\n 'texture', 'size', 'image_ratio', 'allow_stretch'))\n '''Normalized image size within the widget box.\n\n This size will always be fit to the widget size, and will preserve the image\n ratio.\n\n :data:`norm_image_size` is a :class:`~kivy.properties.AliasProperty`, and is\n read-only.\n '''\n\n def __init__(self, **kwargs):\n self._coreimage = None\n super(Image, self).__init__(**kwargs)\n self.bind(source=self.texture_update,\n mipmap=self.texture_update)\n if self.source:\n self.texture_update()\n\n def texture_update(self, *largs):\n if not self.source:\n self.texture = None\n else:\n filename = resource_find(self.source)\n if filename is None:\n return\n mipmap = self.mipmap\n if self._coreimage is not None:\n self._coreimage.unbind(on_texture=self._on_tex_change)\n self._coreimage = ci = CoreImage(filename, mipmap=mipmap,\n anim_delay=self.anim_delay, keep_data=self.keep_data)\n ci.bind(on_texture=self._on_tex_change)\n self.texture = ci.texture\n\n def on_anim_delay(self, instance, value):\n if self._coreimage is None:\n return\n self._coreimage.anim_delay = value\n if value < 0:\n self._coreimage.anim_reset(False)\n\n def on_texture(self, instance, value):\n if value is not None:\n self.texture_size = list(value.size)\n\n def _on_tex_change(self, *largs):\n # update texture from core image\n self.texture = self._coreimage.texture\n\n def reload(self):\n '''Reload image from disk. This facilitates re-loading of\n image from disk in case contents change.\n\n .. versionadded:: 1.3.0\n\n Usage::\n\n im = Image(source = '1.jpg')\n # -- do something --\n im.reload()\n # image will be re-loaded from disk\n\n '''\n self._coreimage.remove_from_cache()\n olsource = self.source\n self.source = ''\n self.source = olsource\n\n\nclass AsyncImage(Image):\n '''Asynchronous Image class. See module documentation for more information.\n '''\n\n def __init__(self, **kwargs):\n self._coreimage = None\n super(AsyncImage, self).__init__(**kwargs)\n self.unbind(source=self.texture_update,\n mipmap=self.texture_update)\n\n def on_source(self, instance, value):\n if not value:\n if self._coreimage is not None:\n self._coreimage.unbind(on_texture=self._on_tex_change)\n self.texture = None\n self._coreimage = None\n else:\n if not self.is_uri(value):\n value = resource_find(value)\n self._coreimage = image = Loader.image(value)\n image.bind(on_load=self.on_source_load)\n image.bind(on_texture=self._on_tex_change)\n self.texture = image.texture\n\n def on_source_load(self, value):\n image = self._coreimage.image\n if not image:\n return\n self.texture = image.texture\n\n def is_uri(self, filename):\n proto = filename.split('://', 1)[0]\n return proto in ('http', 'https', 'ftp', 'smb')\n\n def _on_tex_change(self, *largs):\n if self._coreimage:\n self.texture = self._coreimage.texture\n", "path": "kivy/uix/image.py"}]}
| 3,544 | 182 |
gh_patches_debug_17073
|
rasdani/github-patches
|
git_diff
|
obspy__obspy-1548
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
clients.neic tests fail
Potentially a server side change?
``` bash
$ obspy.runtests clients.neic
Running /Users/lion/workspace/code/obspy/obspy/scripts/runtests.py, ObsPy version '1.0.2.post0+358.ged0c7fb007'
....F
======================================================================
FAIL: test_get_waveform_nscl (obspy.clients.neic.tests.test_client.ClientTestCase)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/lion/workspace/code/obspy/obspy/clients/neic/tests/test_client.py", line 93, in test_get_waveform_nscl
self.assertEqual(st, st2)
AssertionError: <obspy.core.stream.Stream object at 0x10d8276a0> != <obspy.core.stream.Stream object at 0x10cf5a3c8>
----------------------------------------------------------------------
Ran 5 tests in 8.443s
FAILED (failures=1)
```
</issue>
<code>
[start of obspy/clients/syngine/__init__.py]
1 # -*- coding: utf-8 -*-
2 """
3 obspy.clients.syngine - Client for the IRIS Syngine service
4 ===========================================================
5
6 This module offers methods to download from the IRIS syngine service
7 (https://ds.iris.edu/ds/products/syngine/). The service is able to generate
8 fully three dimensional synthetics through various 1D Earth models with
9 arbitrary source-receiver geometries and source mechanisms.
10
11 :copyright:
12 The ObsPy Development Team ([email protected])
13 :license:
14 GNU Lesser General Public License, Version 3
15 (https://www.gnu.org/copyleft/lesser.html)
16
17
18 Basic Usage
19 -----------
20
21 First initialize a client object.
22
23 >>> from obspy.clients.syngine import Client
24 >>> client = Client()
25
26 Then request some data.
27
28 >>> st = client.get_waveforms(model="ak135f_5s", network="IU", station="ANMO",
29 ... eventid="GCMT:C201002270634A")
30 >>> print(st) # doctest: +ELLIPSIS
31 3 Trace(s) in Stream:
32 IU.ANMO.SE.MXZ | 2010-02-27T06:35:14... - ... | 4.0 Hz, 15520 samples
33 IU.ANMO.SE.MXN | 2010-02-27T06:35:14... - ... | 4.0 Hz, 15520 samples
34 IU.ANMO.SE.MXE | 2010-02-27T06:35:14... - ... | 4.0 Hz, 15520 samples
35 >>> st.plot() # doctest: +SKIP
36
37 .. plot::
38
39 from obspy.clients.syngine import Client
40 client = Client()
41 st = client.get_waveforms(model="ak135f_5s", network="IU", station="ANMO",
42 eventid="GCMT:C201002270634A")
43 st.plot()
44
45 The available parameters are explained in detail in the
46 :meth:`~obspy.clients.syngine.client.Client.get_waveforms()` method and on
47 the `Syngine <https://ds.iris.edu/ds/products/syngine/>`_ website.
48
49
50 The queries are quite flexible. The following uses a station name wildcard
51 and only requests data around the P arrival. Please be a bit careful as one
52 can potentially download a lot of data with a single request.
53
54 >>> st = client.get_waveforms(model="ak135f_5s", network="IU", station="AN*",
55 ... eventid="GCMT:C201002270634A",
56 ... starttime="P-10", endtime="P+20")
57 >>> st.plot() # doctest: +SKIP
58
59 .. plot::
60
61 from obspy.clients.syngine import Client
62 client = Client()
63 st = client.get_waveforms(model="ak135f_5s", network="IU", station="A*",
64 eventid="GCMT:C201002270634A",
65 starttime="P-10", endtime="P+20")
66 st.plot()
67
68
69 Bulk Requests
70 -------------
71
72 It is also possible to send requests for multiple user-defined stations. See
73 the :meth:`~obspy.clients.syngine.client.Client.get_waveforms_bulk()` method
74 for details. This example specifies a bunch of receiver coordinates and
75 requests seismograms for all of these.
76
77 >>> bulk = [{"latitude": 10.1, "longitude": 12.2, "stationcode": "AA"},
78 ... {"latitude": 14.5, "longitude": 10.0, "stationcode": "BB"}]
79 >>> st = client.get_waveforms_bulk(
80 ... model="ak135f_5s", eventid="GCMT:C201002270634A",
81 ... bulk=bulk, starttime="P-10", endtime="P+20")
82 >>> print(st) # doctest: +ELLIPSIS
83 6 Trace(s) in Stream:
84 XX.AA.SE.MXZ | 2010-02-27T06:48:11.500000Z - ... | 4.0 Hz, 120 samples
85 XX.AA.SE.MXN | 2010-02-27T06:48:11.500000Z - ... | 4.0 Hz, 120 samples
86 XX.AA.SE.MXE | 2010-02-27T06:48:11.500000Z - ... | 4.0 Hz, 120 samples
87 XX.BB.SE.MXZ | 2010-02-27T06:48:15.250000Z - ... | 4.0 Hz, 120 samples
88 XX.BB.SE.MXN | 2010-02-27T06:48:15.250000Z - ... | 4.0 Hz, 120 samples
89 XX.BB.SE.MXE | 2010-02-27T06:48:15.250000Z - ... | 4.0 Hz, 120 samples
90
91
92 Other Useful Methods
93 --------------------
94
95 Use the :meth:`~obspy.clients.syngine.client.Client.get_available_models()`
96 method for a list of all available methods including some meta-information.
97
98 >>> client.get_available_models() # doctest: +SKIP
99 {'ak135f_1s': {'components': 'vertical only',
100 'default_components': 'Z',
101 'default_dt': '0.05',
102 'description': 'ak135 with density & Q of Montagner & Kennet(1996)',
103 'length': 1815.00690721715,
104 'max_event_depth': 750000,
105 'max_period': '~100',
106 'max_sampling_period': '0.255383',
107 'min_period': 1.04999995231628},
108 'ak135f_2s': ...
109 ...
110 }
111
112 The :func:`~obspy.clients.syngine.client.Client.get_model_info()`
113 method should be used to retrieve information about a specific model.
114
115 >>> from obspy.clients.syngine import Client
116 >>> c = Client()
117 >>> db_info = c.get_model_info(model_name="ak135f_5s")
118 >>> print(db_info.period)
119 5.125
120 """
121 from __future__ import (absolute_import, division, print_function,
122 unicode_literals)
123 from future.builtins import * # NOQA
124 from future.utils import native_str
125
126 from .client import Client # NOQA
127
128 __all__ = [native_str("Client")]
129
130
131 if __name__ == '__main__':
132 import doctest
133 doctest.testmod(exclude_empty=True)
134
[end of obspy/clients/syngine/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/obspy/clients/syngine/__init__.py b/obspy/clients/syngine/__init__.py
--- a/obspy/clients/syngine/__init__.py
+++ b/obspy/clients/syngine/__init__.py
@@ -81,12 +81,12 @@
... bulk=bulk, starttime="P-10", endtime="P+20")
>>> print(st) # doctest: +ELLIPSIS
6 Trace(s) in Stream:
-XX.AA.SE.MXZ | 2010-02-27T06:48:11.500000Z - ... | 4.0 Hz, 120 samples
-XX.AA.SE.MXN | 2010-02-27T06:48:11.500000Z - ... | 4.0 Hz, 120 samples
-XX.AA.SE.MXE | 2010-02-27T06:48:11.500000Z - ... | 4.0 Hz, 120 samples
-XX.BB.SE.MXZ | 2010-02-27T06:48:15.250000Z - ... | 4.0 Hz, 120 samples
-XX.BB.SE.MXN | 2010-02-27T06:48:15.250000Z - ... | 4.0 Hz, 120 samples
-XX.BB.SE.MXE | 2010-02-27T06:48:15.250000Z - ... | 4.0 Hz, 120 samples
+XX.AA.SE.MXZ | 2010-02-27T06:48:11... - ... | 4.0 Hz, 120 samples
+XX.AA.SE.MXN | 2010-02-27T06:48:11... - ... | 4.0 Hz, 120 samples
+XX.AA.SE.MXE | 2010-02-27T06:48:11... - ... | 4.0 Hz, 120 samples
+XX.BB.SE.MXZ | 2010-02-27T06:48:15... - ... | 4.0 Hz, 120 samples
+XX.BB.SE.MXN | 2010-02-27T06:48:15... - ... | 4.0 Hz, 120 samples
+XX.BB.SE.MXE | 2010-02-27T06:48:15... - ... | 4.0 Hz, 120 samples
Other Useful Methods
|
{"golden_diff": "diff --git a/obspy/clients/syngine/__init__.py b/obspy/clients/syngine/__init__.py\n--- a/obspy/clients/syngine/__init__.py\n+++ b/obspy/clients/syngine/__init__.py\n@@ -81,12 +81,12 @@\n ... bulk=bulk, starttime=\"P-10\", endtime=\"P+20\")\n >>> print(st) # doctest: +ELLIPSIS\n 6 Trace(s) in Stream:\n-XX.AA.SE.MXZ | 2010-02-27T06:48:11.500000Z - ... | 4.0 Hz, 120 samples\n-XX.AA.SE.MXN | 2010-02-27T06:48:11.500000Z - ... | 4.0 Hz, 120 samples\n-XX.AA.SE.MXE | 2010-02-27T06:48:11.500000Z - ... | 4.0 Hz, 120 samples\n-XX.BB.SE.MXZ | 2010-02-27T06:48:15.250000Z - ... | 4.0 Hz, 120 samples\n-XX.BB.SE.MXN | 2010-02-27T06:48:15.250000Z - ... | 4.0 Hz, 120 samples\n-XX.BB.SE.MXE | 2010-02-27T06:48:15.250000Z - ... | 4.0 Hz, 120 samples\n+XX.AA.SE.MXZ | 2010-02-27T06:48:11... - ... | 4.0 Hz, 120 samples\n+XX.AA.SE.MXN | 2010-02-27T06:48:11... - ... | 4.0 Hz, 120 samples\n+XX.AA.SE.MXE | 2010-02-27T06:48:11... - ... | 4.0 Hz, 120 samples\n+XX.BB.SE.MXZ | 2010-02-27T06:48:15... - ... | 4.0 Hz, 120 samples\n+XX.BB.SE.MXN | 2010-02-27T06:48:15... - ... | 4.0 Hz, 120 samples\n+XX.BB.SE.MXE | 2010-02-27T06:48:15... - ... | 4.0 Hz, 120 samples\n \n \n Other Useful Methods\n", "issue": "clients.neic tests fail\nPotentially a server side change?\n\n``` bash\n$ obspy.runtests clients.neic\n\nRunning /Users/lion/workspace/code/obspy/obspy/scripts/runtests.py, ObsPy version '1.0.2.post0+358.ged0c7fb007'\n....F\n\n======================================================================\nFAIL: test_get_waveform_nscl (obspy.clients.neic.tests.test_client.ClientTestCase)\n----------------------------------------------------------------------\nTraceback (most recent call last):\n File \"/Users/lion/workspace/code/obspy/obspy/clients/neic/tests/test_client.py\", line 93, in test_get_waveform_nscl\n self.assertEqual(st, st2)\nAssertionError: <obspy.core.stream.Stream object at 0x10d8276a0> != <obspy.core.stream.Stream object at 0x10cf5a3c8>\n\n----------------------------------------------------------------------\nRan 5 tests in 8.443s\n\nFAILED (failures=1)\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nobspy.clients.syngine - Client for the IRIS Syngine service\n===========================================================\n\nThis module offers methods to download from the IRIS syngine service\n(https://ds.iris.edu/ds/products/syngine/). The service is able to generate\nfully three dimensional synthetics through various 1D Earth models with\narbitrary source-receiver geometries and source mechanisms.\n\n:copyright:\n The ObsPy Development Team ([email protected])\n:license:\n GNU Lesser General Public License, Version 3\n (https://www.gnu.org/copyleft/lesser.html)\n\n\nBasic Usage\n-----------\n\nFirst initialize a client object.\n\n>>> from obspy.clients.syngine import Client\n>>> client = Client()\n\nThen request some data.\n\n>>> st = client.get_waveforms(model=\"ak135f_5s\", network=\"IU\", station=\"ANMO\",\n... eventid=\"GCMT:C201002270634A\")\n>>> print(st) # doctest: +ELLIPSIS\n3 Trace(s) in Stream:\nIU.ANMO.SE.MXZ | 2010-02-27T06:35:14... - ... | 4.0 Hz, 15520 samples\nIU.ANMO.SE.MXN | 2010-02-27T06:35:14... - ... | 4.0 Hz, 15520 samples\nIU.ANMO.SE.MXE | 2010-02-27T06:35:14... - ... | 4.0 Hz, 15520 samples\n>>> st.plot() # doctest: +SKIP\n\n.. plot::\n\n from obspy.clients.syngine import Client\n client = Client()\n st = client.get_waveforms(model=\"ak135f_5s\", network=\"IU\", station=\"ANMO\",\n eventid=\"GCMT:C201002270634A\")\n st.plot()\n\nThe available parameters are explained in detail in the\n:meth:`~obspy.clients.syngine.client.Client.get_waveforms()` method and on\nthe `Syngine <https://ds.iris.edu/ds/products/syngine/>`_ website.\n\n\nThe queries are quite flexible. The following uses a station name wildcard\nand only requests data around the P arrival. Please be a bit careful as one\ncan potentially download a lot of data with a single request.\n\n>>> st = client.get_waveforms(model=\"ak135f_5s\", network=\"IU\", station=\"AN*\",\n... eventid=\"GCMT:C201002270634A\",\n... starttime=\"P-10\", endtime=\"P+20\")\n>>> st.plot() # doctest: +SKIP\n\n.. plot::\n\n from obspy.clients.syngine import Client\n client = Client()\n st = client.get_waveforms(model=\"ak135f_5s\", network=\"IU\", station=\"A*\",\n eventid=\"GCMT:C201002270634A\",\n starttime=\"P-10\", endtime=\"P+20\")\n st.plot()\n\n\nBulk Requests\n-------------\n\nIt is also possible to send requests for multiple user-defined stations. See\nthe :meth:`~obspy.clients.syngine.client.Client.get_waveforms_bulk()` method\nfor details. This example specifies a bunch of receiver coordinates and\nrequests seismograms for all of these.\n\n>>> bulk = [{\"latitude\": 10.1, \"longitude\": 12.2, \"stationcode\": \"AA\"},\n... {\"latitude\": 14.5, \"longitude\": 10.0, \"stationcode\": \"BB\"}]\n>>> st = client.get_waveforms_bulk(\n... model=\"ak135f_5s\", eventid=\"GCMT:C201002270634A\",\n... bulk=bulk, starttime=\"P-10\", endtime=\"P+20\")\n>>> print(st) # doctest: +ELLIPSIS\n6 Trace(s) in Stream:\nXX.AA.SE.MXZ | 2010-02-27T06:48:11.500000Z - ... | 4.0 Hz, 120 samples\nXX.AA.SE.MXN | 2010-02-27T06:48:11.500000Z - ... | 4.0 Hz, 120 samples\nXX.AA.SE.MXE | 2010-02-27T06:48:11.500000Z - ... | 4.0 Hz, 120 samples\nXX.BB.SE.MXZ | 2010-02-27T06:48:15.250000Z - ... | 4.0 Hz, 120 samples\nXX.BB.SE.MXN | 2010-02-27T06:48:15.250000Z - ... | 4.0 Hz, 120 samples\nXX.BB.SE.MXE | 2010-02-27T06:48:15.250000Z - ... | 4.0 Hz, 120 samples\n\n\nOther Useful Methods\n--------------------\n\nUse the :meth:`~obspy.clients.syngine.client.Client.get_available_models()`\nmethod for a list of all available methods including some meta-information.\n\n>>> client.get_available_models() # doctest: +SKIP\n{'ak135f_1s': {'components': 'vertical only',\n 'default_components': 'Z',\n 'default_dt': '0.05',\n 'description': 'ak135 with density & Q of Montagner & Kennet(1996)',\n 'length': 1815.00690721715,\n 'max_event_depth': 750000,\n 'max_period': '~100',\n 'max_sampling_period': '0.255383',\n 'min_period': 1.04999995231628},\n 'ak135f_2s': ...\n ...\n }\n\nThe :func:`~obspy.clients.syngine.client.Client.get_model_info()`\nmethod should be used to retrieve information about a specific model.\n\n>>> from obspy.clients.syngine import Client\n>>> c = Client()\n>>> db_info = c.get_model_info(model_name=\"ak135f_5s\")\n>>> print(db_info.period)\n5.125\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom future.builtins import * # NOQA\nfrom future.utils import native_str\n\nfrom .client import Client # NOQA\n\n__all__ = [native_str(\"Client\")]\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod(exclude_empty=True)\n", "path": "obspy/clients/syngine/__init__.py"}]}
| 2,671 | 700 |
gh_patches_debug_41908
|
rasdani/github-patches
|
git_diff
|
cal-itp__benefits-2045
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Form field validation: update copy to be more specific on the problem


## Acceptance Criteria
- [x] Configure the browser validation message - https://developer.mozilla.org/en-US/docs/Web/HTML/Constraint_validation#controlling_the_text_of_constraint_violation
- [x] Test with different browser locales
- [x] Add custom copy for both Eligibility Index (radio buttons) and Eligibility Confirm (text fields)
</issue>
<code>
[start of benefits/eligibility/forms.py]
1 """
2 The eligibility application: Form definition for the eligibility verification flow.
3 """
4
5 import logging
6
7 from django import forms
8 from django.utils.translation import gettext_lazy as _
9
10 from benefits.core import models, recaptcha, widgets
11
12 logger = logging.getLogger(__name__)
13
14
15 class EligibilityVerifierSelectionForm(forms.Form):
16 """Form to capture eligibility verifier selection."""
17
18 action_url = "eligibility:index"
19 id = "form-verifier-selection"
20 method = "POST"
21
22 verifier = forms.ChoiceField(label="", widget=widgets.VerifierRadioSelect)
23 # sets label to empty string so the radio_select template can override the label style
24 submit_value = _("Choose this benefit")
25
26 def __init__(self, agency: models.TransitAgency, *args, **kwargs):
27 super().__init__(*args, **kwargs)
28 verifiers = agency.active_verifiers
29
30 self.classes = "col-lg-8"
31 # second element is not used since we render the whole label using selection_label_template,
32 # therefore set to None
33 self.fields["verifier"].choices = [(v.id, None) for v in verifiers]
34 self.fields["verifier"].widget.selection_label_templates = {v.id: v.selection_label_template for v in verifiers}
35
36 def clean(self):
37 if not recaptcha.verify(self.data):
38 raise forms.ValidationError("reCAPTCHA failed")
39
40
41 class EligibilityVerificationForm(forms.Form):
42 """Form to collect eligibility verification details."""
43
44 action_url = "eligibility:confirm"
45 id = "form-eligibility-verification"
46 method = "POST"
47
48 submit_value = _("Find my record")
49 submitting_value = _("Checking")
50
51 _error_messages = {
52 "invalid": _("Check your input. The format looks wrong."),
53 "missing": _("This field is required."),
54 }
55
56 def __init__(
57 self,
58 title,
59 headline,
60 blurb,
61 name_label,
62 name_placeholder,
63 name_help_text,
64 sub_label,
65 sub_placeholder,
66 sub_help_text,
67 name_max_length=None,
68 sub_input_mode=None,
69 sub_max_length=None,
70 sub_pattern=None,
71 *args,
72 **kwargs,
73 ):
74 """Initialize a new EligibilityVerifier form.
75
76 Args:
77 title (str): The page (i.e. tab) title for the form's page.
78
79 headline (str): The <h1> on the form's page.
80
81 blurb (str): Intro <p> on the form's page.
82
83 name_label (str): Label for the name form field.
84
85 name_placeholder (str): Field placeholder for the name form field.
86
87 name_help_text (str): Extra help text for the name form field.
88
89 sub_label (str): Label for the sub form field.
90
91 sub_placeholder (str): Field placeholder for the sub form field.
92
93 sub_help_text (str): Extra help text for the sub form field.
94
95 name_max_length (int): The maximum length accepted for the 'name' API field before sending to this verifier
96
97 sub_input_mode (str): Input mode can be "numeric", "tel", "search", etc. to override default "text" keyboard on
98 mobile devices
99
100 sub_max_length (int): The maximum length accepted for the 'sub' API field before sending to this verifier
101
102 sub_pattern (str): A regular expression used to validate the 'sub' API field before sending to this verifier
103
104 Extra args and kwargs are passed through to the underlying django.forms.Form.
105 """
106 super().__init__(auto_id=True, label_suffix="", *args, **kwargs)
107
108 self.title = title
109 self.headline = headline
110 self.blurb = blurb
111
112 self.classes = "col-lg-6"
113 sub_widget = widgets.FormControlTextInput(placeholder=sub_placeholder)
114 if sub_pattern:
115 sub_widget.attrs.update({"pattern": sub_pattern})
116 if sub_input_mode:
117 sub_widget.attrs.update({"inputmode": sub_input_mode})
118 if sub_max_length:
119 sub_widget.attrs.update({"maxlength": sub_max_length})
120
121 self.fields["sub"] = forms.CharField(
122 label=sub_label,
123 widget=sub_widget,
124 help_text=sub_help_text,
125 )
126
127 name_widget = widgets.FormControlTextInput(placeholder=name_placeholder)
128 if name_max_length:
129 name_widget.attrs.update({"maxlength": name_max_length})
130
131 self.fields["name"] = forms.CharField(label=name_label, widget=name_widget, help_text=name_help_text)
132
133 def clean(self):
134 if not recaptcha.verify(self.data):
135 raise forms.ValidationError("reCAPTCHA failed")
136
137
138 class MSTCourtesyCard(EligibilityVerificationForm):
139 """EligibilityVerification form for the MST Courtesy Card."""
140
141 def __init__(self, *args, **kwargs):
142 super().__init__(
143 title=_("Agency card information"),
144 headline=_("Let’s find the record of your transit benefit."),
145 blurb=_(
146 "We use the information on your MST Courtesy Card to find the record of your transit benefit in our system."
147 ),
148 name_label=_("Last Name"),
149 name_placeholder="Garcia",
150 name_help_text=_(
151 "Please enter your last name the same way it is printed on your card, including capital letters and hyphens."
152 ),
153 sub_label=_("Courtesy Card number"),
154 sub_help_text=_("This is a 5-digit number on the front and back of your card."),
155 sub_placeholder="12345",
156 name_max_length=255,
157 sub_input_mode="numeric",
158 sub_max_length=5,
159 sub_pattern=r"\d{5}",
160 *args,
161 **kwargs,
162 )
163
164
165 class SBMTDMobilityPass(EligibilityVerificationForm):
166 """EligibilityVerification form for the SBMTD Reduced Fare Mobility ID."""
167
168 def __init__(self, *args, **kwargs):
169 super().__init__(
170 title=_("Agency card information"),
171 headline=_("Let’s find the record of your transit benefit."),
172 blurb=_(
173 "We use the information on your SBMTD Reduced Fare Mobility ID card to find the record of your transit "
174 + "benefit in our system."
175 ),
176 name_label=_("Last Name"),
177 name_placeholder="Garcia",
178 name_help_text=_(
179 "Please enter your last name the same way it is printed on your card, including capital letters and hyphens."
180 ),
181 sub_label=_("Reduced Fare Mobility ID card number"),
182 sub_help_text=_("This is a 4-digit number on the back of your card."),
183 sub_placeholder="1234",
184 name_max_length=255,
185 sub_input_mode="numeric",
186 sub_max_length=4,
187 sub_pattern=r"\d{4}",
188 *args,
189 **kwargs,
190 )
191
[end of benefits/eligibility/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/benefits/eligibility/forms.py b/benefits/eligibility/forms.py
--- a/benefits/eligibility/forms.py
+++ b/benefits/eligibility/forms.py
@@ -30,8 +30,11 @@
self.classes = "col-lg-8"
# second element is not used since we render the whole label using selection_label_template,
# therefore set to None
- self.fields["verifier"].choices = [(v.id, None) for v in verifiers]
- self.fields["verifier"].widget.selection_label_templates = {v.id: v.selection_label_template for v in verifiers}
+ verifier_field = self.fields["verifier"]
+ verifier_field.choices = [(v.id, None) for v in verifiers]
+ verifier_field.widget.selection_label_templates = {v.id: v.selection_label_template for v in verifiers}
+ verifier_field.widget.attrs.update({"data-custom-validity": _("Please choose a transit benefit.")})
+ self.use_custom_validity = True
def clean(self):
if not recaptcha.verify(self.data):
@@ -48,11 +51,6 @@
submit_value = _("Find my record")
submitting_value = _("Checking")
- _error_messages = {
- "invalid": _("Check your input. The format looks wrong."),
- "missing": _("This field is required."),
- }
-
def __init__(
self,
title,
@@ -68,6 +66,8 @@
sub_input_mode=None,
sub_max_length=None,
sub_pattern=None,
+ sub_custom_validity=None,
+ name_custom_validity=None,
*args,
**kwargs,
):
@@ -117,6 +117,9 @@
sub_widget.attrs.update({"inputmode": sub_input_mode})
if sub_max_length:
sub_widget.attrs.update({"maxlength": sub_max_length})
+ if sub_custom_validity:
+ sub_widget.attrs.update({"data-custom-validity": sub_custom_validity})
+ self.use_custom_validity = True
self.fields["sub"] = forms.CharField(
label=sub_label,
@@ -127,6 +130,9 @@
name_widget = widgets.FormControlTextInput(placeholder=name_placeholder)
if name_max_length:
name_widget.attrs.update({"maxlength": name_max_length})
+ if name_custom_validity:
+ name_widget.attrs.update({"data-custom-validity": name_custom_validity})
+ self.use_custom_validity = True
self.fields["name"] = forms.CharField(label=name_label, widget=name_widget, help_text=name_help_text)
@@ -157,6 +163,8 @@
sub_input_mode="numeric",
sub_max_length=5,
sub_pattern=r"\d{5}",
+ sub_custom_validity=_("Please enter a 5-digit number."),
+ name_custom_validity=_("Please enter your last name."),
*args,
**kwargs,
)
@@ -185,6 +193,8 @@
sub_input_mode="numeric",
sub_max_length=4,
sub_pattern=r"\d{4}",
+ sub_custom_validity=_("Please enter a 4-digit number."),
+ name_custom_validity=_("Please enter your last name."),
*args,
**kwargs,
)
|
{"golden_diff": "diff --git a/benefits/eligibility/forms.py b/benefits/eligibility/forms.py\n--- a/benefits/eligibility/forms.py\n+++ b/benefits/eligibility/forms.py\n@@ -30,8 +30,11 @@\n self.classes = \"col-lg-8\"\n # second element is not used since we render the whole label using selection_label_template,\n # therefore set to None\n- self.fields[\"verifier\"].choices = [(v.id, None) for v in verifiers]\n- self.fields[\"verifier\"].widget.selection_label_templates = {v.id: v.selection_label_template for v in verifiers}\n+ verifier_field = self.fields[\"verifier\"]\n+ verifier_field.choices = [(v.id, None) for v in verifiers]\n+ verifier_field.widget.selection_label_templates = {v.id: v.selection_label_template for v in verifiers}\n+ verifier_field.widget.attrs.update({\"data-custom-validity\": _(\"Please choose a transit benefit.\")})\n+ self.use_custom_validity = True\n \n def clean(self):\n if not recaptcha.verify(self.data):\n@@ -48,11 +51,6 @@\n submit_value = _(\"Find my record\")\n submitting_value = _(\"Checking\")\n \n- _error_messages = {\n- \"invalid\": _(\"Check your input. The format looks wrong.\"),\n- \"missing\": _(\"This field is required.\"),\n- }\n-\n def __init__(\n self,\n title,\n@@ -68,6 +66,8 @@\n sub_input_mode=None,\n sub_max_length=None,\n sub_pattern=None,\n+ sub_custom_validity=None,\n+ name_custom_validity=None,\n *args,\n **kwargs,\n ):\n@@ -117,6 +117,9 @@\n sub_widget.attrs.update({\"inputmode\": sub_input_mode})\n if sub_max_length:\n sub_widget.attrs.update({\"maxlength\": sub_max_length})\n+ if sub_custom_validity:\n+ sub_widget.attrs.update({\"data-custom-validity\": sub_custom_validity})\n+ self.use_custom_validity = True\n \n self.fields[\"sub\"] = forms.CharField(\n label=sub_label,\n@@ -127,6 +130,9 @@\n name_widget = widgets.FormControlTextInput(placeholder=name_placeholder)\n if name_max_length:\n name_widget.attrs.update({\"maxlength\": name_max_length})\n+ if name_custom_validity:\n+ name_widget.attrs.update({\"data-custom-validity\": name_custom_validity})\n+ self.use_custom_validity = True\n \n self.fields[\"name\"] = forms.CharField(label=name_label, widget=name_widget, help_text=name_help_text)\n \n@@ -157,6 +163,8 @@\n sub_input_mode=\"numeric\",\n sub_max_length=5,\n sub_pattern=r\"\\d{5}\",\n+ sub_custom_validity=_(\"Please enter a 5-digit number.\"),\n+ name_custom_validity=_(\"Please enter your last name.\"),\n *args,\n **kwargs,\n )\n@@ -185,6 +193,8 @@\n sub_input_mode=\"numeric\",\n sub_max_length=4,\n sub_pattern=r\"\\d{4}\",\n+ sub_custom_validity=_(\"Please enter a 4-digit number.\"),\n+ name_custom_validity=_(\"Please enter your last name.\"),\n *args,\n **kwargs,\n )\n", "issue": "Form field validation: update copy to be more specific on the problem\n\r\n\r\n\r\n\r\n\r\n\r\n## Acceptance Criteria\r\n\r\n- [x] Configure the browser validation message - https://developer.mozilla.org/en-US/docs/Web/HTML/Constraint_validation#controlling_the_text_of_constraint_violation\r\n- [x] Test with different browser locales\r\n- [x] Add custom copy for both Eligibility Index (radio buttons) and Eligibility Confirm (text fields)\r\n\n", "before_files": [{"content": "\"\"\"\nThe eligibility application: Form definition for the eligibility verification flow.\n\"\"\"\n\nimport logging\n\nfrom django import forms\nfrom django.utils.translation import gettext_lazy as _\n\nfrom benefits.core import models, recaptcha, widgets\n\nlogger = logging.getLogger(__name__)\n\n\nclass EligibilityVerifierSelectionForm(forms.Form):\n \"\"\"Form to capture eligibility verifier selection.\"\"\"\n\n action_url = \"eligibility:index\"\n id = \"form-verifier-selection\"\n method = \"POST\"\n\n verifier = forms.ChoiceField(label=\"\", widget=widgets.VerifierRadioSelect)\n # sets label to empty string so the radio_select template can override the label style\n submit_value = _(\"Choose this benefit\")\n\n def __init__(self, agency: models.TransitAgency, *args, **kwargs):\n super().__init__(*args, **kwargs)\n verifiers = agency.active_verifiers\n\n self.classes = \"col-lg-8\"\n # second element is not used since we render the whole label using selection_label_template,\n # therefore set to None\n self.fields[\"verifier\"].choices = [(v.id, None) for v in verifiers]\n self.fields[\"verifier\"].widget.selection_label_templates = {v.id: v.selection_label_template for v in verifiers}\n\n def clean(self):\n if not recaptcha.verify(self.data):\n raise forms.ValidationError(\"reCAPTCHA failed\")\n\n\nclass EligibilityVerificationForm(forms.Form):\n \"\"\"Form to collect eligibility verification details.\"\"\"\n\n action_url = \"eligibility:confirm\"\n id = \"form-eligibility-verification\"\n method = \"POST\"\n\n submit_value = _(\"Find my record\")\n submitting_value = _(\"Checking\")\n\n _error_messages = {\n \"invalid\": _(\"Check your input. The format looks wrong.\"),\n \"missing\": _(\"This field is required.\"),\n }\n\n def __init__(\n self,\n title,\n headline,\n blurb,\n name_label,\n name_placeholder,\n name_help_text,\n sub_label,\n sub_placeholder,\n sub_help_text,\n name_max_length=None,\n sub_input_mode=None,\n sub_max_length=None,\n sub_pattern=None,\n *args,\n **kwargs,\n ):\n \"\"\"Initialize a new EligibilityVerifier form.\n\n Args:\n title (str): The page (i.e. tab) title for the form's page.\n\n headline (str): The <h1> on the form's page.\n\n blurb (str): Intro <p> on the form's page.\n\n name_label (str): Label for the name form field.\n\n name_placeholder (str): Field placeholder for the name form field.\n\n name_help_text (str): Extra help text for the name form field.\n\n sub_label (str): Label for the sub form field.\n\n sub_placeholder (str): Field placeholder for the sub form field.\n\n sub_help_text (str): Extra help text for the sub form field.\n\n name_max_length (int): The maximum length accepted for the 'name' API field before sending to this verifier\n\n sub_input_mode (str): Input mode can be \"numeric\", \"tel\", \"search\", etc. to override default \"text\" keyboard on\n mobile devices\n\n sub_max_length (int): The maximum length accepted for the 'sub' API field before sending to this verifier\n\n sub_pattern (str): A regular expression used to validate the 'sub' API field before sending to this verifier\n\n Extra args and kwargs are passed through to the underlying django.forms.Form.\n \"\"\"\n super().__init__(auto_id=True, label_suffix=\"\", *args, **kwargs)\n\n self.title = title\n self.headline = headline\n self.blurb = blurb\n\n self.classes = \"col-lg-6\"\n sub_widget = widgets.FormControlTextInput(placeholder=sub_placeholder)\n if sub_pattern:\n sub_widget.attrs.update({\"pattern\": sub_pattern})\n if sub_input_mode:\n sub_widget.attrs.update({\"inputmode\": sub_input_mode})\n if sub_max_length:\n sub_widget.attrs.update({\"maxlength\": sub_max_length})\n\n self.fields[\"sub\"] = forms.CharField(\n label=sub_label,\n widget=sub_widget,\n help_text=sub_help_text,\n )\n\n name_widget = widgets.FormControlTextInput(placeholder=name_placeholder)\n if name_max_length:\n name_widget.attrs.update({\"maxlength\": name_max_length})\n\n self.fields[\"name\"] = forms.CharField(label=name_label, widget=name_widget, help_text=name_help_text)\n\n def clean(self):\n if not recaptcha.verify(self.data):\n raise forms.ValidationError(\"reCAPTCHA failed\")\n\n\nclass MSTCourtesyCard(EligibilityVerificationForm):\n \"\"\"EligibilityVerification form for the MST Courtesy Card.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(\n title=_(\"Agency card information\"),\n headline=_(\"Let\u2019s find the record of your transit benefit.\"),\n blurb=_(\n \"We use the information on your MST Courtesy Card to find the record of your transit benefit in our system.\"\n ),\n name_label=_(\"Last Name\"),\n name_placeholder=\"Garcia\",\n name_help_text=_(\n \"Please enter your last name the same way it is printed on your card, including capital letters and hyphens.\"\n ),\n sub_label=_(\"Courtesy Card number\"),\n sub_help_text=_(\"This is a 5-digit number on the front and back of your card.\"),\n sub_placeholder=\"12345\",\n name_max_length=255,\n sub_input_mode=\"numeric\",\n sub_max_length=5,\n sub_pattern=r\"\\d{5}\",\n *args,\n **kwargs,\n )\n\n\nclass SBMTDMobilityPass(EligibilityVerificationForm):\n \"\"\"EligibilityVerification form for the SBMTD Reduced Fare Mobility ID.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(\n title=_(\"Agency card information\"),\n headline=_(\"Let\u2019s find the record of your transit benefit.\"),\n blurb=_(\n \"We use the information on your SBMTD Reduced Fare Mobility ID card to find the record of your transit \"\n + \"benefit in our system.\"\n ),\n name_label=_(\"Last Name\"),\n name_placeholder=\"Garcia\",\n name_help_text=_(\n \"Please enter your last name the same way it is printed on your card, including capital letters and hyphens.\"\n ),\n sub_label=_(\"Reduced Fare Mobility ID card number\"),\n sub_help_text=_(\"This is a 4-digit number on the back of your card.\"),\n sub_placeholder=\"1234\",\n name_max_length=255,\n sub_input_mode=\"numeric\",\n sub_max_length=4,\n sub_pattern=r\"\\d{4}\",\n *args,\n **kwargs,\n )\n", "path": "benefits/eligibility/forms.py"}]}
| 2,658 | 720 |
gh_patches_debug_4135
|
rasdani/github-patches
|
git_diff
|
ipython__ipython-13905
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
'Bad file descriptor' error logged in atexit call back
**Issue:** Following log lines are observed in stderr on daemonizing a process with `IPython.utils.io` as an import.
> Exception ignored in atexit callback: <built-in method close of _io.TextIOWrapper object at 0x7f19e8aa9b10>
> OSError: [Errno 9] Bad file descriptor
**Reproducer:**
```python
import daemon
import sys
import IPython.utils.io
file_descriptors_to_preserve = []
with daemon.DaemonContext(
prevent_core=False,
signal_map={},
stderr=sys.stderr,
stdout=sys.stdout,
files_preserve=file_descriptors_to_preserve,
):
pass
```
**Root cause:**
This is due to the following lines in I**Python.utils.io module**
```python
# setup stdin/stdout/stderr to sys.stdin/sys.stdout/sys.stderr
devnull = open(os.devnull, "w", encoding="utf-8")
atexit.register(devnull.close)
```
After the child process is launched, all the files opened by the parent process are closed(except for stderr, stdout). While closing all the files, devnull opened in the IPython.utils.io module is also closed. On process exit atexit module runs all the functions that were registered. Since devnull close is registered, atexit handler is trying to close the already closed file which is generating ' Bad file descriptor' log in stderr
</issue>
<code>
[start of IPython/utils/io.py]
1 # encoding: utf-8
2 """
3 IO related utilities.
4 """
5
6 # Copyright (c) IPython Development Team.
7 # Distributed under the terms of the Modified BSD License.
8
9
10
11 import atexit
12 import os
13 import sys
14 import tempfile
15 from pathlib import Path
16 from warnings import warn
17
18 from IPython.utils.decorators import undoc
19 from .capture import CapturedIO, capture_output
20
21 # setup stdin/stdout/stderr to sys.stdin/sys.stdout/sys.stderr
22 devnull = open(os.devnull, "w", encoding="utf-8")
23 atexit.register(devnull.close)
24
25
26 class Tee(object):
27 """A class to duplicate an output stream to stdout/err.
28
29 This works in a manner very similar to the Unix 'tee' command.
30
31 When the object is closed or deleted, it closes the original file given to
32 it for duplication.
33 """
34 # Inspired by:
35 # http://mail.python.org/pipermail/python-list/2007-May/442737.html
36
37 def __init__(self, file_or_name, mode="w", channel='stdout'):
38 """Construct a new Tee object.
39
40 Parameters
41 ----------
42 file_or_name : filename or open filehandle (writable)
43 File that will be duplicated
44 mode : optional, valid mode for open().
45 If a filename was give, open with this mode.
46 channel : str, one of ['stdout', 'stderr']
47 """
48 if channel not in ['stdout', 'stderr']:
49 raise ValueError('Invalid channel spec %s' % channel)
50
51 if hasattr(file_or_name, 'write') and hasattr(file_or_name, 'seek'):
52 self.file = file_or_name
53 else:
54 encoding = None if "b" in mode else "utf-8"
55 self.file = open(file_or_name, mode, encoding=encoding)
56 self.channel = channel
57 self.ostream = getattr(sys, channel)
58 setattr(sys, channel, self)
59 self._closed = False
60
61 def close(self):
62 """Close the file and restore the channel."""
63 self.flush()
64 setattr(sys, self.channel, self.ostream)
65 self.file.close()
66 self._closed = True
67
68 def write(self, data):
69 """Write data to both channels."""
70 self.file.write(data)
71 self.ostream.write(data)
72 self.ostream.flush()
73
74 def flush(self):
75 """Flush both channels."""
76 self.file.flush()
77 self.ostream.flush()
78
79 def __del__(self):
80 if not self._closed:
81 self.close()
82
83
84 def ask_yes_no(prompt, default=None, interrupt=None):
85 """Asks a question and returns a boolean (y/n) answer.
86
87 If default is given (one of 'y','n'), it is used if the user input is
88 empty. If interrupt is given (one of 'y','n'), it is used if the user
89 presses Ctrl-C. Otherwise the question is repeated until an answer is
90 given.
91
92 An EOF is treated as the default answer. If there is no default, an
93 exception is raised to prevent infinite loops.
94
95 Valid answers are: y/yes/n/no (match is not case sensitive)."""
96
97 answers = {'y':True,'n':False,'yes':True,'no':False}
98 ans = None
99 while ans not in answers.keys():
100 try:
101 ans = input(prompt+' ').lower()
102 if not ans: # response was an empty string
103 ans = default
104 except KeyboardInterrupt:
105 if interrupt:
106 ans = interrupt
107 print("\r")
108 except EOFError:
109 if default in answers.keys():
110 ans = default
111 print()
112 else:
113 raise
114
115 return answers[ans]
116
117
118 def temp_pyfile(src, ext='.py'):
119 """Make a temporary python file, return filename and filehandle.
120
121 Parameters
122 ----------
123 src : string or list of strings (no need for ending newlines if list)
124 Source code to be written to the file.
125 ext : optional, string
126 Extension for the generated file.
127
128 Returns
129 -------
130 (filename, open filehandle)
131 It is the caller's responsibility to close the open file and unlink it.
132 """
133 fname = tempfile.mkstemp(ext)[1]
134 with open(Path(fname), "w", encoding="utf-8") as f:
135 f.write(src)
136 f.flush()
137 return fname
138
139
140 @undoc
141 def raw_print(*args, **kw):
142 """DEPRECATED: Raw print to sys.__stdout__, otherwise identical interface to print()."""
143 warn("IPython.utils.io.raw_print has been deprecated since IPython 7.0", DeprecationWarning, stacklevel=2)
144
145 print(*args, sep=kw.get('sep', ' '), end=kw.get('end', '\n'),
146 file=sys.__stdout__)
147 sys.__stdout__.flush()
148
149 @undoc
150 def raw_print_err(*args, **kw):
151 """DEPRECATED: Raw print to sys.__stderr__, otherwise identical interface to print()."""
152 warn("IPython.utils.io.raw_print_err has been deprecated since IPython 7.0", DeprecationWarning, stacklevel=2)
153
154 print(*args, sep=kw.get('sep', ' '), end=kw.get('end', '\n'),
155 file=sys.__stderr__)
156 sys.__stderr__.flush()
157
[end of IPython/utils/io.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/IPython/utils/io.py b/IPython/utils/io.py
--- a/IPython/utils/io.py
+++ b/IPython/utils/io.py
@@ -18,11 +18,6 @@
from IPython.utils.decorators import undoc
from .capture import CapturedIO, capture_output
-# setup stdin/stdout/stderr to sys.stdin/sys.stdout/sys.stderr
-devnull = open(os.devnull, "w", encoding="utf-8")
-atexit.register(devnull.close)
-
-
class Tee(object):
"""A class to duplicate an output stream to stdout/err.
|
{"golden_diff": "diff --git a/IPython/utils/io.py b/IPython/utils/io.py\n--- a/IPython/utils/io.py\n+++ b/IPython/utils/io.py\n@@ -18,11 +18,6 @@\n from IPython.utils.decorators import undoc\n from .capture import CapturedIO, capture_output\n \n-# setup stdin/stdout/stderr to sys.stdin/sys.stdout/sys.stderr\n-devnull = open(os.devnull, \"w\", encoding=\"utf-8\")\n-atexit.register(devnull.close)\n-\n-\n class Tee(object):\n \"\"\"A class to duplicate an output stream to stdout/err.\n", "issue": "'Bad file descriptor' error logged in atexit call back \n**Issue:** Following log lines are observed in stderr on daemonizing a process with `IPython.utils.io` as an import.\r\n> Exception ignored in atexit callback: <built-in method close of _io.TextIOWrapper object at 0x7f19e8aa9b10>\r\n> OSError: [Errno 9] Bad file descriptor\r\n\r\n\r\n**Reproducer:**\r\n```python\r\nimport daemon\r\nimport sys\r\nimport IPython.utils.io\r\n\r\nfile_descriptors_to_preserve = []\r\nwith daemon.DaemonContext(\r\n prevent_core=False,\r\n signal_map={},\r\n stderr=sys.stderr,\r\n stdout=sys.stdout,\r\n files_preserve=file_descriptors_to_preserve,\r\n):\r\n pass \r\n```\r\n\r\n**Root cause:**\r\nThis is due to the following lines in I**Python.utils.io module**\r\n```python\r\n# setup stdin/stdout/stderr to sys.stdin/sys.stdout/sys.stderr\r\ndevnull = open(os.devnull, \"w\", encoding=\"utf-8\")\r\natexit.register(devnull.close)\r\n```\r\nAfter the child process is launched, all the files opened by the parent process are closed(except for stderr, stdout). While closing all the files, devnull opened in the IPython.utils.io module is also closed. On process exit atexit module runs all the functions that were registered. Since devnull close is registered, atexit handler is trying to close the already closed file which is generating ' Bad file descriptor' log in stderr\n", "before_files": [{"content": "# encoding: utf-8\n\"\"\"\nIO related utilities.\n\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\n\n\nimport atexit\nimport os\nimport sys\nimport tempfile\nfrom pathlib import Path\nfrom warnings import warn\n\nfrom IPython.utils.decorators import undoc\nfrom .capture import CapturedIO, capture_output\n\n# setup stdin/stdout/stderr to sys.stdin/sys.stdout/sys.stderr\ndevnull = open(os.devnull, \"w\", encoding=\"utf-8\")\natexit.register(devnull.close)\n\n\nclass Tee(object):\n \"\"\"A class to duplicate an output stream to stdout/err.\n\n This works in a manner very similar to the Unix 'tee' command.\n\n When the object is closed or deleted, it closes the original file given to\n it for duplication.\n \"\"\"\n # Inspired by:\n # http://mail.python.org/pipermail/python-list/2007-May/442737.html\n\n def __init__(self, file_or_name, mode=\"w\", channel='stdout'):\n \"\"\"Construct a new Tee object.\n\n Parameters\n ----------\n file_or_name : filename or open filehandle (writable)\n File that will be duplicated\n mode : optional, valid mode for open().\n If a filename was give, open with this mode.\n channel : str, one of ['stdout', 'stderr']\n \"\"\"\n if channel not in ['stdout', 'stderr']:\n raise ValueError('Invalid channel spec %s' % channel)\n\n if hasattr(file_or_name, 'write') and hasattr(file_or_name, 'seek'):\n self.file = file_or_name\n else:\n encoding = None if \"b\" in mode else \"utf-8\"\n self.file = open(file_or_name, mode, encoding=encoding)\n self.channel = channel\n self.ostream = getattr(sys, channel)\n setattr(sys, channel, self)\n self._closed = False\n\n def close(self):\n \"\"\"Close the file and restore the channel.\"\"\"\n self.flush()\n setattr(sys, self.channel, self.ostream)\n self.file.close()\n self._closed = True\n\n def write(self, data):\n \"\"\"Write data to both channels.\"\"\"\n self.file.write(data)\n self.ostream.write(data)\n self.ostream.flush()\n\n def flush(self):\n \"\"\"Flush both channels.\"\"\"\n self.file.flush()\n self.ostream.flush()\n\n def __del__(self):\n if not self._closed:\n self.close()\n\n\ndef ask_yes_no(prompt, default=None, interrupt=None):\n \"\"\"Asks a question and returns a boolean (y/n) answer.\n\n If default is given (one of 'y','n'), it is used if the user input is\n empty. If interrupt is given (one of 'y','n'), it is used if the user\n presses Ctrl-C. Otherwise the question is repeated until an answer is\n given.\n\n An EOF is treated as the default answer. If there is no default, an\n exception is raised to prevent infinite loops.\n\n Valid answers are: y/yes/n/no (match is not case sensitive).\"\"\"\n\n answers = {'y':True,'n':False,'yes':True,'no':False}\n ans = None\n while ans not in answers.keys():\n try:\n ans = input(prompt+' ').lower()\n if not ans: # response was an empty string\n ans = default\n except KeyboardInterrupt:\n if interrupt:\n ans = interrupt\n print(\"\\r\")\n except EOFError:\n if default in answers.keys():\n ans = default\n print()\n else:\n raise\n\n return answers[ans]\n\n\ndef temp_pyfile(src, ext='.py'):\n \"\"\"Make a temporary python file, return filename and filehandle.\n\n Parameters\n ----------\n src : string or list of strings (no need for ending newlines if list)\n Source code to be written to the file.\n ext : optional, string\n Extension for the generated file.\n\n Returns\n -------\n (filename, open filehandle)\n It is the caller's responsibility to close the open file and unlink it.\n \"\"\"\n fname = tempfile.mkstemp(ext)[1]\n with open(Path(fname), \"w\", encoding=\"utf-8\") as f:\n f.write(src)\n f.flush()\n return fname\n\n\n@undoc\ndef raw_print(*args, **kw):\n \"\"\"DEPRECATED: Raw print to sys.__stdout__, otherwise identical interface to print().\"\"\"\n warn(\"IPython.utils.io.raw_print has been deprecated since IPython 7.0\", DeprecationWarning, stacklevel=2)\n\n print(*args, sep=kw.get('sep', ' '), end=kw.get('end', '\\n'),\n file=sys.__stdout__)\n sys.__stdout__.flush()\n\n@undoc\ndef raw_print_err(*args, **kw):\n \"\"\"DEPRECATED: Raw print to sys.__stderr__, otherwise identical interface to print().\"\"\"\n warn(\"IPython.utils.io.raw_print_err has been deprecated since IPython 7.0\", DeprecationWarning, stacklevel=2)\n\n print(*args, sep=kw.get('sep', ' '), end=kw.get('end', '\\n'),\n file=sys.__stderr__)\n sys.__stderr__.flush()\n", "path": "IPython/utils/io.py"}]}
| 2,366 | 125 |
gh_patches_debug_28085
|
rasdani/github-patches
|
git_diff
|
dynaconf__dynaconf-550
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The venv folder is not being excluded from pre-commit hooks
Hello! At first, I would like to thank you for the library.
I follow the [CONTRIBUTING.md guide](https://github.com/rochacbruno/dynaconf/blob/master/CONTRIBUTING.md) and run the `make all` command to check if everything was right before I see the code. When I do that the **reorder-python-imports** hook failed because of some reorders that he made in some files in the `venv` folder:
```bash
...
Reordering imports in venv/lib/python3.7/site-packages/pynvim/msgpack_rpc/msgpack_stream.py
Reordering imports in venv/lib/python3.7/site-packages/tqdm/contrib/itertools.py
Reordering imports in venv/lib/python3.7/site-packages/coverage/execfile.py
Reordering imports in venv/lib/python3.7/site-packages/pyflakes/test/test_undefined_names.py
Reordering imports in venv/lib/python3.7/site-packages/nltk/test/unit/test_tgrep.py
Reordering imports in venv/lib/python3.7/site-packages/future/moves/test/__init__.py
Reordering imports in venv/lib/python3.7/site-packages/pynvim/api/common.py
Reordering imports in venv/lib/python3.7/site-packages/nltk/parse/recursivedescent.py
Reordering imports in venv/lib/python3.7/site-packages/django/contrib/admin/templatetags/base.py
Reordering imports in venv/lib/python3.7/site-packages/jeepney/tests/test_bus.py
Reordering imports in venv/lib/python3.7/site-packages/pip/_vendor/cachecontrol/serialize.py
Reordering imports in venv/lib/python3.7/site-packages/sphinx/transforms/post_transforms/__init__.py
Reordering imports in venv/bin/rst2latex.py
Reordering imports in venv/lib/python3.7/site-packages/cryptography/hazmat/primitives/ciphers/modes.py
Reordering imports in venv/lib/python3.7/site-packages/setuptools/_distutils/ccompiler.py
Reordering imports in venv/lib/python3.7/site-packages/botocore/vendored/requests/packages/__init__.py
...
```
Besides, the **black** hook was taking too much time to finish and I think that this happened because he was reformating the files in the `venv` folder. Is that an expected behavior? Am I doing something wrong or missing some configuration?
</issue>
<code>
[start of dynaconf/loaders/base.py]
1 import io
2 import warnings
3
4 from dynaconf.utils import build_env_list
5 from dynaconf.utils import ensure_a_list
6 from dynaconf.utils import upperfy
7
8
9 class BaseLoader:
10 """Base loader for dynaconf source files.
11
12 :param obj: {[LazySettings]} -- [Dynaconf settings]
13 :param env: {[string]} -- [the current env to be loaded defaults to
14 [development]]
15 :param identifier: {[string]} -- [identifier ini, yaml, json, py, toml]
16 :param extensions: {[list]} -- [List of extensions with dots ['.a', '.b']]
17 :param file_reader: {[callable]} -- [reads file return dict]
18 :param string_reader: {[callable]} -- [reads string return dict]
19 """
20
21 def __init__(
22 self, obj, env, identifier, extensions, file_reader, string_reader
23 ):
24 """Instantiates a loader for different sources"""
25 self.obj = obj
26 self.env = env or obj.current_env
27 self.identifier = identifier
28 self.extensions = extensions
29 self.file_reader = file_reader
30 self.string_reader = string_reader
31
32 @staticmethod
33 def warn_not_installed(obj, identifier): # pragma: no cover
34 if identifier not in obj._not_installed_warnings:
35 warnings.warn(
36 f"{identifier} support is not installed in your environment. "
37 f"`pip install dynaconf[{identifier}]`"
38 )
39 obj._not_installed_warnings.append(identifier)
40
41 def load(self, filename=None, key=None, silent=True):
42 """
43 Reads and loads in to `self.obj` a single key or all keys from source
44
45 :param filename: Optional filename to load
46 :param key: if provided load a single key
47 :param silent: if load erros should be silenced
48 """
49 filename = filename or self.obj.get(self.identifier.upper())
50 if not filename:
51 return
52
53 if not isinstance(filename, (list, tuple)):
54 split_files = ensure_a_list(filename)
55 if all([f.endswith(self.extensions) for f in split_files]): # noqa
56 files = split_files # it is a ['file.ext', ...]
57 else: # it is a single config as string
58 files = [filename]
59 else: # it is already a list/tuple
60 files = filename
61
62 source_data = self.get_source_data(files)
63
64 if self.obj.get("ENVIRONMENTS_FOR_DYNACONF") is False:
65 self._envless_load(source_data, silent, key)
66 else:
67 self._load_all_envs(source_data, silent, key)
68
69 def get_source_data(self, files):
70 """Reads each file and returns source data for each file
71 {"path/to/file.ext": {"key": "value"}}
72 """
73 data = {}
74 for source_file in files:
75 if source_file.endswith(self.extensions):
76 try:
77 with io.open(
78 source_file,
79 encoding=self.obj.get(
80 "ENCODING_FOR_DYNACONF", "utf-8"
81 ),
82 ) as open_file:
83 content = self.file_reader(open_file)
84 self.obj._loaded_files.append(source_file)
85 if content:
86 data[source_file] = content
87 except IOError as e:
88 if ".local." not in source_file:
89 warnings.warn(
90 f"{self.identifier}_loader: {source_file} "
91 f":{str(e)}"
92 )
93 else:
94 # for tests it is possible to pass string
95 content = self.string_reader(source_file)
96 if content:
97 data[source_file] = content
98 return data
99
100 def _envless_load(self, source_data, silent=True, key=None):
101 """Load all the keys from each file without env separation"""
102 for file_data in source_data.values():
103 self._set_data_to_obj(file_data, self.identifier, key=key)
104
105 def _load_all_envs(self, source_data, silent=True, key=None):
106 """Load configs from files separating by each environment"""
107
108 for file_data in source_data.values():
109
110 # env name is checked in lower
111 file_data = {k.lower(): value for k, value in file_data.items()}
112
113 # is there a `dynaconf_merge` on top level of file?
114 file_merge = file_data.get("dynaconf_merge")
115
116 for env in build_env_list(self.obj, self.env):
117 env = env.lower() # lower for better comparison
118 data = {}
119
120 try:
121 data = file_data[env] or {}
122 except KeyError:
123 if silent:
124 continue
125 raise
126
127 if not data:
128 continue
129
130 if env != self.obj.get("DEFAULT_ENV_FOR_DYNACONF").lower():
131 identifier = f"{self.identifier}_{env}"
132 else:
133 identifier = self.identifier
134
135 self._set_data_to_obj(data, identifier, file_merge, key)
136
137 def _set_data_to_obj(
138 self, data, identifier, file_merge=None, key=False,
139 ):
140 """Calls setttings.set to add the keys"""
141
142 # data 1st level keys should be transformed to upper case.
143 data = {upperfy(k): v for k, v in data.items()}
144 if key:
145 key = upperfy(key)
146
147 # is there a `dynaconf_merge` inside an `[env]`?
148 file_merge = file_merge or data.pop("DYNACONF_MERGE", False)
149
150 if not key:
151 self.obj.update(
152 data, loader_identifier=identifier, merge=file_merge,
153 )
154 elif key in data:
155 self.obj.set(
156 key,
157 data.get(key),
158 loader_identifier=identifier,
159 merge=file_merge,
160 )
161
[end of dynaconf/loaders/base.py]
[start of example/issues/379_dict_like_iteration/app.py]
1 from dynaconf import Dynaconf
2
3 settings = Dynaconf(settings_files=["settings.toml"],)
4
5 assert settings.key == "value"
6 assert settings.number == 789
7 assert settings.a_dict.nested.other_level == "nested value"
8 assert settings["a_boolean"] is False
9 assert settings.get("DONTEXIST", default=1) == 1
10
11
12 for item in settings:
13 print(item)
14
15
16 for key, value in settings.items(): # dict like iteration
17 print(key, value)
18
[end of example/issues/379_dict_like_iteration/app.py]
[start of example/issues/519_underscore_in_name/app.py]
1 import os
2
3 from dynaconf import Dynaconf
4
5 config = Dynaconf(settings_files=["settings.yml"], envvar_prefix="ATC",)
6
7 # envvar set is case insensitive
8 # ATC_BLE__DEVICE_ID=x and ATC_BLE__device_id=x are the same
9 expected = os.environ.get("EXPECTED_VALUE", 0)
10
11 # access is case insensitive
12 assert config.ble.device_id == int(expected)
13 assert config.BLE.DEVICE_ID == int(expected)
14
[end of example/issues/519_underscore_in_name/app.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/dynaconf/loaders/base.py b/dynaconf/loaders/base.py
--- a/dynaconf/loaders/base.py
+++ b/dynaconf/loaders/base.py
@@ -135,7 +135,11 @@
self._set_data_to_obj(data, identifier, file_merge, key)
def _set_data_to_obj(
- self, data, identifier, file_merge=None, key=False,
+ self,
+ data,
+ identifier,
+ file_merge=None,
+ key=False,
):
"""Calls setttings.set to add the keys"""
@@ -149,7 +153,9 @@
if not key:
self.obj.update(
- data, loader_identifier=identifier, merge=file_merge,
+ data,
+ loader_identifier=identifier,
+ merge=file_merge,
)
elif key in data:
self.obj.set(
diff --git a/example/issues/379_dict_like_iteration/app.py b/example/issues/379_dict_like_iteration/app.py
--- a/example/issues/379_dict_like_iteration/app.py
+++ b/example/issues/379_dict_like_iteration/app.py
@@ -1,6 +1,8 @@
from dynaconf import Dynaconf
-settings = Dynaconf(settings_files=["settings.toml"],)
+settings = Dynaconf(
+ settings_files=["settings.toml"],
+)
assert settings.key == "value"
assert settings.number == 789
diff --git a/example/issues/519_underscore_in_name/app.py b/example/issues/519_underscore_in_name/app.py
--- a/example/issues/519_underscore_in_name/app.py
+++ b/example/issues/519_underscore_in_name/app.py
@@ -2,7 +2,10 @@
from dynaconf import Dynaconf
-config = Dynaconf(settings_files=["settings.yml"], envvar_prefix="ATC",)
+config = Dynaconf(
+ settings_files=["settings.yml"],
+ envvar_prefix="ATC",
+)
# envvar set is case insensitive
# ATC_BLE__DEVICE_ID=x and ATC_BLE__device_id=x are the same
|
{"golden_diff": "diff --git a/dynaconf/loaders/base.py b/dynaconf/loaders/base.py\n--- a/dynaconf/loaders/base.py\n+++ b/dynaconf/loaders/base.py\n@@ -135,7 +135,11 @@\n self._set_data_to_obj(data, identifier, file_merge, key)\n \n def _set_data_to_obj(\n- self, data, identifier, file_merge=None, key=False,\n+ self,\n+ data,\n+ identifier,\n+ file_merge=None,\n+ key=False,\n ):\n \"\"\"Calls setttings.set to add the keys\"\"\"\n \n@@ -149,7 +153,9 @@\n \n if not key:\n self.obj.update(\n- data, loader_identifier=identifier, merge=file_merge,\n+ data,\n+ loader_identifier=identifier,\n+ merge=file_merge,\n )\n elif key in data:\n self.obj.set(\ndiff --git a/example/issues/379_dict_like_iteration/app.py b/example/issues/379_dict_like_iteration/app.py\n--- a/example/issues/379_dict_like_iteration/app.py\n+++ b/example/issues/379_dict_like_iteration/app.py\n@@ -1,6 +1,8 @@\n from dynaconf import Dynaconf\n \n-settings = Dynaconf(settings_files=[\"settings.toml\"],)\n+settings = Dynaconf(\n+ settings_files=[\"settings.toml\"],\n+)\n \n assert settings.key == \"value\"\n assert settings.number == 789\ndiff --git a/example/issues/519_underscore_in_name/app.py b/example/issues/519_underscore_in_name/app.py\n--- a/example/issues/519_underscore_in_name/app.py\n+++ b/example/issues/519_underscore_in_name/app.py\n@@ -2,7 +2,10 @@\n \n from dynaconf import Dynaconf\n \n-config = Dynaconf(settings_files=[\"settings.yml\"], envvar_prefix=\"ATC\",)\n+config = Dynaconf(\n+ settings_files=[\"settings.yml\"],\n+ envvar_prefix=\"ATC\",\n+)\n \n # envvar set is case insensitive\n # ATC_BLE__DEVICE_ID=x and ATC_BLE__device_id=x are the same\n", "issue": "The venv folder is not being excluded from pre-commit hooks\nHello! At first, I would like to thank you for the library.\r\n\r\nI follow the [CONTRIBUTING.md guide](https://github.com/rochacbruno/dynaconf/blob/master/CONTRIBUTING.md) and run the `make all` command to check if everything was right before I see the code. When I do that the **reorder-python-imports** hook failed because of some reorders that he made in some files in the `venv` folder:\r\n\r\n```bash\r\n...\r\nReordering imports in venv/lib/python3.7/site-packages/pynvim/msgpack_rpc/msgpack_stream.py\r\nReordering imports in venv/lib/python3.7/site-packages/tqdm/contrib/itertools.py\r\nReordering imports in venv/lib/python3.7/site-packages/coverage/execfile.py\r\nReordering imports in venv/lib/python3.7/site-packages/pyflakes/test/test_undefined_names.py\r\nReordering imports in venv/lib/python3.7/site-packages/nltk/test/unit/test_tgrep.py\r\nReordering imports in venv/lib/python3.7/site-packages/future/moves/test/__init__.py\r\nReordering imports in venv/lib/python3.7/site-packages/pynvim/api/common.py\r\nReordering imports in venv/lib/python3.7/site-packages/nltk/parse/recursivedescent.py\r\nReordering imports in venv/lib/python3.7/site-packages/django/contrib/admin/templatetags/base.py\r\nReordering imports in venv/lib/python3.7/site-packages/jeepney/tests/test_bus.py\r\nReordering imports in venv/lib/python3.7/site-packages/pip/_vendor/cachecontrol/serialize.py\r\nReordering imports in venv/lib/python3.7/site-packages/sphinx/transforms/post_transforms/__init__.py\r\nReordering imports in venv/bin/rst2latex.py\r\nReordering imports in venv/lib/python3.7/site-packages/cryptography/hazmat/primitives/ciphers/modes.py\r\nReordering imports in venv/lib/python3.7/site-packages/setuptools/_distutils/ccompiler.py\r\nReordering imports in venv/lib/python3.7/site-packages/botocore/vendored/requests/packages/__init__.py\r\n...\r\n```\r\n\r\nBesides, the **black** hook was taking too much time to finish and I think that this happened because he was reformating the files in the `venv` folder. Is that an expected behavior? Am I doing something wrong or missing some configuration?\n", "before_files": [{"content": "import io\nimport warnings\n\nfrom dynaconf.utils import build_env_list\nfrom dynaconf.utils import ensure_a_list\nfrom dynaconf.utils import upperfy\n\n\nclass BaseLoader:\n \"\"\"Base loader for dynaconf source files.\n\n :param obj: {[LazySettings]} -- [Dynaconf settings]\n :param env: {[string]} -- [the current env to be loaded defaults to\n [development]]\n :param identifier: {[string]} -- [identifier ini, yaml, json, py, toml]\n :param extensions: {[list]} -- [List of extensions with dots ['.a', '.b']]\n :param file_reader: {[callable]} -- [reads file return dict]\n :param string_reader: {[callable]} -- [reads string return dict]\n \"\"\"\n\n def __init__(\n self, obj, env, identifier, extensions, file_reader, string_reader\n ):\n \"\"\"Instantiates a loader for different sources\"\"\"\n self.obj = obj\n self.env = env or obj.current_env\n self.identifier = identifier\n self.extensions = extensions\n self.file_reader = file_reader\n self.string_reader = string_reader\n\n @staticmethod\n def warn_not_installed(obj, identifier): # pragma: no cover\n if identifier not in obj._not_installed_warnings:\n warnings.warn(\n f\"{identifier} support is not installed in your environment. \"\n f\"`pip install dynaconf[{identifier}]`\"\n )\n obj._not_installed_warnings.append(identifier)\n\n def load(self, filename=None, key=None, silent=True):\n \"\"\"\n Reads and loads in to `self.obj` a single key or all keys from source\n\n :param filename: Optional filename to load\n :param key: if provided load a single key\n :param silent: if load erros should be silenced\n \"\"\"\n filename = filename or self.obj.get(self.identifier.upper())\n if not filename:\n return\n\n if not isinstance(filename, (list, tuple)):\n split_files = ensure_a_list(filename)\n if all([f.endswith(self.extensions) for f in split_files]): # noqa\n files = split_files # it is a ['file.ext', ...]\n else: # it is a single config as string\n files = [filename]\n else: # it is already a list/tuple\n files = filename\n\n source_data = self.get_source_data(files)\n\n if self.obj.get(\"ENVIRONMENTS_FOR_DYNACONF\") is False:\n self._envless_load(source_data, silent, key)\n else:\n self._load_all_envs(source_data, silent, key)\n\n def get_source_data(self, files):\n \"\"\"Reads each file and returns source data for each file\n {\"path/to/file.ext\": {\"key\": \"value\"}}\n \"\"\"\n data = {}\n for source_file in files:\n if source_file.endswith(self.extensions):\n try:\n with io.open(\n source_file,\n encoding=self.obj.get(\n \"ENCODING_FOR_DYNACONF\", \"utf-8\"\n ),\n ) as open_file:\n content = self.file_reader(open_file)\n self.obj._loaded_files.append(source_file)\n if content:\n data[source_file] = content\n except IOError as e:\n if \".local.\" not in source_file:\n warnings.warn(\n f\"{self.identifier}_loader: {source_file} \"\n f\":{str(e)}\"\n )\n else:\n # for tests it is possible to pass string\n content = self.string_reader(source_file)\n if content:\n data[source_file] = content\n return data\n\n def _envless_load(self, source_data, silent=True, key=None):\n \"\"\"Load all the keys from each file without env separation\"\"\"\n for file_data in source_data.values():\n self._set_data_to_obj(file_data, self.identifier, key=key)\n\n def _load_all_envs(self, source_data, silent=True, key=None):\n \"\"\"Load configs from files separating by each environment\"\"\"\n\n for file_data in source_data.values():\n\n # env name is checked in lower\n file_data = {k.lower(): value for k, value in file_data.items()}\n\n # is there a `dynaconf_merge` on top level of file?\n file_merge = file_data.get(\"dynaconf_merge\")\n\n for env in build_env_list(self.obj, self.env):\n env = env.lower() # lower for better comparison\n data = {}\n\n try:\n data = file_data[env] or {}\n except KeyError:\n if silent:\n continue\n raise\n\n if not data:\n continue\n\n if env != self.obj.get(\"DEFAULT_ENV_FOR_DYNACONF\").lower():\n identifier = f\"{self.identifier}_{env}\"\n else:\n identifier = self.identifier\n\n self._set_data_to_obj(data, identifier, file_merge, key)\n\n def _set_data_to_obj(\n self, data, identifier, file_merge=None, key=False,\n ):\n \"\"\"Calls setttings.set to add the keys\"\"\"\n\n # data 1st level keys should be transformed to upper case.\n data = {upperfy(k): v for k, v in data.items()}\n if key:\n key = upperfy(key)\n\n # is there a `dynaconf_merge` inside an `[env]`?\n file_merge = file_merge or data.pop(\"DYNACONF_MERGE\", False)\n\n if not key:\n self.obj.update(\n data, loader_identifier=identifier, merge=file_merge,\n )\n elif key in data:\n self.obj.set(\n key,\n data.get(key),\n loader_identifier=identifier,\n merge=file_merge,\n )\n", "path": "dynaconf/loaders/base.py"}, {"content": "from dynaconf import Dynaconf\n\nsettings = Dynaconf(settings_files=[\"settings.toml\"],)\n\nassert settings.key == \"value\"\nassert settings.number == 789\nassert settings.a_dict.nested.other_level == \"nested value\"\nassert settings[\"a_boolean\"] is False\nassert settings.get(\"DONTEXIST\", default=1) == 1\n\n\nfor item in settings:\n print(item)\n\n\nfor key, value in settings.items(): # dict like iteration\n print(key, value)\n", "path": "example/issues/379_dict_like_iteration/app.py"}, {"content": "import os\n\nfrom dynaconf import Dynaconf\n\nconfig = Dynaconf(settings_files=[\"settings.yml\"], envvar_prefix=\"ATC\",)\n\n# envvar set is case insensitive\n# ATC_BLE__DEVICE_ID=x and ATC_BLE__device_id=x are the same\nexpected = os.environ.get(\"EXPECTED_VALUE\", 0)\n\n# access is case insensitive\nassert config.ble.device_id == int(expected)\nassert config.BLE.DEVICE_ID == int(expected)\n", "path": "example/issues/519_underscore_in_name/app.py"}]}
| 3,007 | 478 |
gh_patches_debug_25920
|
rasdani/github-patches
|
git_diff
|
microsoft__botbuilder-python-924
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[PORT] Fix errors when From is null in telemetry
> Port this change from botbuilder-dotnet/master branch:
https://github.com/microsoft/botbuilder-dotnet/pull/3436
Fixes: #3395
If `TelemetryLoggerMiddleware` is used and then `CreateConversationAsync()` is called, there are cases where `Activity.From` will be null, which causes things like this to throw:
```csharp
{ TelemetryConstants.FromIdProperty, activity.From.Id },
```
**Changes**:
Adds a few null-conditional operators, where appropriate.
**Testing**:
Added one test which calls `CreateConversationAsync()` while ensuring the `Activity.From` is null.
# Changed projects
* Microsoft.Bot.Builder
* integration
* Microsoft.Bot.Builder.Tests
</issue>
<code>
[start of libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3 """Middleware Component for logging Activity messages."""
4
5 from typing import Awaitable, Callable, List, Dict
6 from botbuilder.schema import Activity, ConversationReference, ActivityTypes
7 from .bot_telemetry_client import BotTelemetryClient
8 from .bot_assert import BotAssert
9 from .middleware_set import Middleware
10 from .null_telemetry_client import NullTelemetryClient
11 from .turn_context import TurnContext
12 from .telemetry_constants import TelemetryConstants
13 from .telemetry_logger_constants import TelemetryLoggerConstants
14
15
16 # pylint: disable=line-too-long
17 class TelemetryLoggerMiddleware(Middleware):
18 """Middleware for logging incoming, outgoing, updated or deleted Activity messages."""
19
20 def __init__(
21 self, telemetry_client: BotTelemetryClient, log_personal_information: bool
22 ) -> None:
23 super(TelemetryLoggerMiddleware, self).__init__()
24 self._telemetry_client = telemetry_client or NullTelemetryClient()
25 self._log_personal_information = log_personal_information
26
27 @property
28 def telemetry_client(self) -> BotTelemetryClient:
29 """Gets the currently configured BotTelemetryClient."""
30 return self._telemetry_client
31
32 @property
33 def log_personal_information(self) -> bool:
34 """ Gets a value indicating whether determines whether to log personal
35 information that came from the user."""
36 return self._log_personal_information
37
38 # pylint: disable=arguments-differ
39 async def on_turn(
40 self, context: TurnContext, logic_fn: Callable[[TurnContext], Awaitable]
41 ) -> None:
42 """Logs events based on incoming and outgoing activities using
43 BotTelemetryClient base class
44
45 :param turn_context: The context object for this turn.
46 :param logic: Callable to continue the bot middleware pipeline
47
48 :return: None
49 """
50 BotAssert.context_not_none(context)
51
52 # Log incoming activity at beginning of turn
53 if context.activity:
54 activity = context.activity
55 # Log Bot Message Received
56 await self.on_receive_activity(activity)
57
58 # hook up onSend pipeline
59 # pylint: disable=unused-argument
60 async def send_activities_handler(
61 ctx: TurnContext,
62 activities: List[Activity],
63 next_send: Callable[[], Awaitable[None]],
64 ):
65 # Run full pipeline
66 responses = await next_send()
67 for activity in activities:
68 await self.on_send_activity(activity)
69 return responses
70
71 context.on_send_activities(send_activities_handler)
72
73 # hook up update activity pipeline
74 async def update_activity_handler(
75 ctx: TurnContext, activity: Activity, next_update: Callable[[], Awaitable]
76 ):
77 # Run full pipeline
78 response = await next_update()
79 await self.on_update_activity(activity)
80 return response
81
82 context.on_update_activity(update_activity_handler)
83
84 # hook up delete activity pipeline
85 async def delete_activity_handler(
86 ctx: TurnContext,
87 reference: ConversationReference,
88 next_delete: Callable[[], Awaitable],
89 ):
90 # Run full pipeline
91 await next_delete()
92
93 delete_msg = Activity(
94 type=ActivityTypes.message_delete, id=reference.activity_id
95 )
96 deleted_activity: Activity = TurnContext.apply_conversation_reference(
97 delete_msg, reference, False
98 )
99 await self.on_delete_activity(deleted_activity)
100
101 context.on_delete_activity(delete_activity_handler)
102
103 if logic_fn:
104 await logic_fn()
105
106 async def on_receive_activity(self, activity: Activity) -> None:
107 """Invoked when a message is received from the user.
108 Performs logging of telemetry data using the BotTelemetryClient.track_event() method.
109 This event name used is "BotMessageReceived".
110 :param activity: Current activity sent from user.
111 """
112 self.telemetry_client.track_event(
113 TelemetryLoggerConstants.BOT_MSG_RECEIVE_EVENT,
114 await self.fill_receive_event_properties(activity),
115 )
116
117 async def on_send_activity(self, activity: Activity) -> None:
118 """Invoked when the bot sends a message to the user.
119 Performs logging of telemetry data using the BotTelemetryClient.track_event() method.
120 This event name used is "BotMessageSend".
121 :param activity: Current activity sent from bot.
122 """
123 self.telemetry_client.track_event(
124 TelemetryLoggerConstants.BOT_MSG_SEND_EVENT,
125 await self.fill_send_event_properties(activity),
126 )
127
128 async def on_update_activity(self, activity: Activity) -> None:
129 """Invoked when the bot updates a message.
130 Performs logging of telemetry data using the BotTelemetryClient.track_event() method.
131 This event name used is "BotMessageUpdate".
132 :param activity: Current activity sent from user.
133 """
134 self.telemetry_client.track_event(
135 TelemetryLoggerConstants.BOT_MSG_UPDATE_EVENT,
136 await self.fill_update_event_properties(activity),
137 )
138
139 async def on_delete_activity(self, activity: Activity) -> None:
140 """Invoked when the bot deletes a message.
141 Performs logging of telemetry data using the BotTelemetryClient.track_event() method.
142 This event name used is "BotMessageDelete".
143 :param activity: Current activity sent from user.
144 """
145 self.telemetry_client.track_event(
146 TelemetryLoggerConstants.BOT_MSG_DELETE_EVENT,
147 await self.fill_delete_event_properties(activity),
148 )
149
150 async def fill_receive_event_properties(
151 self, activity: Activity, additional_properties: Dict[str, str] = None
152 ) -> Dict[str, str]:
153 """Fills the event properties for the BotMessageReceived.
154 Adheres to the LogPersonalInformation flag to filter Name, Text and Speak properties.
155 :param activity: activity sent from user.
156 :param additional_properties: Additional properties to add to the event.
157 Additional properties can override "stock" properties.
158
159 :return: A dictionary that is sent as "Properties" to
160 BotTelemetryClient.track_event method for the BotMessageReceived event.
161 """
162 properties = {
163 TelemetryConstants.FROM_ID_PROPERTY: activity.from_property.id,
164 TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,
165 TelemetryConstants.LOCALE_PROPERTY: activity.locale,
166 TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,
167 TelemetryConstants.RECIPIENT_NAME_PROPERTY: activity.from_property.name,
168 }
169
170 if self.log_personal_information:
171 if activity.from_property.name and activity.from_property.name.strip():
172 properties[
173 TelemetryConstants.FROM_NAME_PROPERTY
174 ] = activity.from_property.name
175 if activity.text and activity.text.strip():
176 properties[TelemetryConstants.TEXT_PROPERTY] = activity.text
177 if activity.speak and activity.speak.strip():
178 properties[TelemetryConstants.SPEAK_PROPERTY] = activity.speak
179
180 # Additional properties can override "stock" properties
181 if additional_properties:
182 for prop in additional_properties:
183 properties[prop.key] = prop.value
184
185 return properties
186
187 async def fill_send_event_properties(
188 self, activity: Activity, additional_properties: Dict[str, str] = None
189 ) -> Dict[str, str]:
190 """Fills the event properties for the BotMessageSend.
191 These properties are logged when an activity message is sent by the Bot to the user.
192 :param activity: activity sent from user.
193 :param additional_properties: Additional properties to add to the event.
194 Additional properties can override "stock" properties.
195
196 :return: A dictionary that is sent as "Properties" to the
197 BotTelemetryClient.track_event method for the BotMessageSend event.
198 """
199 properties = {
200 TelemetryConstants.REPLY_ACTIVITY_ID_PROPERTY: activity.reply_to_id,
201 TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,
202 TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,
203 TelemetryConstants.LOCALE_PROPERTY: activity.locale,
204 }
205
206 # Use the LogPersonalInformation flag to toggle logging PII data, text and user name are common examples
207 if self.log_personal_information:
208 if activity.from_property.name and activity.from_property.name.strip():
209 properties[
210 TelemetryConstants.FROM_NAME_PROPERTY
211 ] = activity.from_property.name
212 if activity.text and activity.text.strip():
213 properties[TelemetryConstants.TEXT_PROPERTY] = activity.text
214 if activity.speak and activity.speak.strip():
215 properties[TelemetryConstants.SPEAK_PROPERTY] = activity.speak
216
217 # Additional properties can override "stock" properties
218 if additional_properties:
219 for prop in additional_properties:
220 properties[prop.key] = prop.value
221
222 return properties
223
224 async def fill_update_event_properties(
225 self, activity: Activity, additional_properties: Dict[str, str] = None
226 ) -> Dict[str, str]:
227 """Fills the event properties for the BotMessageUpdate.
228 These properties are logged when an activity message is updated by the Bot.
229 For example, if a card is interacted with by the use, and the card needs
230 to be updated to reflect some interaction.
231 :param activity: activity sent from user.
232 :param additional_properties: Additional properties to add to the event.
233 Additional properties can override "stock" properties.
234
235 :return: A dictionary that is sent as "Properties" to the
236 BotTelemetryClient.track_event method for the BotMessageUpdate event.
237 """
238 properties = {
239 TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,
240 TelemetryConstants.CONVERSATION_ID_PROPERTY: activity.conversation.id,
241 TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,
242 TelemetryConstants.LOCALE_PROPERTY: activity.locale,
243 }
244
245 # Use the LogPersonalInformation flag to toggle logging PII data, text is a common examples
246 if self.log_personal_information:
247 if activity.text and activity.text.strip():
248 properties[TelemetryConstants.TEXT_PROPERTY] = activity.text
249
250 # Additional properties can override "stock" properties
251 if additional_properties:
252 for prop in additional_properties:
253 properties[prop.key] = prop.value
254
255 return properties
256
257 async def fill_delete_event_properties(
258 self, activity: Activity, additional_properties: Dict[str, str] = None
259 ) -> Dict[str, str]:
260 """Fills the event properties for the BotMessageDelete.
261 These properties are logged when an activity message is deleted by the Bot.
262 :param activity: activity sent from user.
263 :param additional_properties: Additional properties to add to the event.
264 Additional properties can override "stock" properties.
265
266 :return: A dictionary that is sent as "Properties" to the
267 BotTelemetryClient.track_event method for the BotMessageUpdate event.
268 """
269 properties = {
270 TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,
271 TelemetryConstants.CONVERSATION_ID_PROPERTY: activity.conversation.id,
272 TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,
273 }
274
275 # Additional properties can override "stock" properties
276 if additional_properties:
277 for prop in additional_properties:
278 properties[prop.key] = prop.value
279
280 return properties
281
[end of libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py b/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py
--- a/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py
+++ b/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py
@@ -160,15 +160,21 @@
BotTelemetryClient.track_event method for the BotMessageReceived event.
"""
properties = {
- TelemetryConstants.FROM_ID_PROPERTY: activity.from_property.id,
+ TelemetryConstants.FROM_ID_PROPERTY: activity.from_property.id
+ if activity.from_property
+ else None,
TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,
TelemetryConstants.LOCALE_PROPERTY: activity.locale,
TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,
- TelemetryConstants.RECIPIENT_NAME_PROPERTY: activity.from_property.name,
+ TelemetryConstants.RECIPIENT_NAME_PROPERTY: activity.recipient.name,
}
if self.log_personal_information:
- if activity.from_property.name and activity.from_property.name.strip():
+ if (
+ activity.from_property
+ and activity.from_property.name
+ and activity.from_property.name.strip()
+ ):
properties[
TelemetryConstants.FROM_NAME_PROPERTY
] = activity.from_property.name
|
{"golden_diff": "diff --git a/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py b/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py\n--- a/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py\n+++ b/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py\n@@ -160,15 +160,21 @@\n BotTelemetryClient.track_event method for the BotMessageReceived event.\n \"\"\"\n properties = {\n- TelemetryConstants.FROM_ID_PROPERTY: activity.from_property.id,\n+ TelemetryConstants.FROM_ID_PROPERTY: activity.from_property.id\n+ if activity.from_property\n+ else None,\n TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,\n TelemetryConstants.LOCALE_PROPERTY: activity.locale,\n TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,\n- TelemetryConstants.RECIPIENT_NAME_PROPERTY: activity.from_property.name,\n+ TelemetryConstants.RECIPIENT_NAME_PROPERTY: activity.recipient.name,\n }\n \n if self.log_personal_information:\n- if activity.from_property.name and activity.from_property.name.strip():\n+ if (\n+ activity.from_property\n+ and activity.from_property.name\n+ and activity.from_property.name.strip()\n+ ):\n properties[\n TelemetryConstants.FROM_NAME_PROPERTY\n ] = activity.from_property.name\n", "issue": "[PORT] Fix errors when From is null in telemetry\n> Port this change from botbuilder-dotnet/master branch:\nhttps://github.com/microsoft/botbuilder-dotnet/pull/3436\n\nFixes: #3395 \r\n\r\nIf `TelemetryLoggerMiddleware` is used and then `CreateConversationAsync()` is called, there are cases where `Activity.From` will be null, which causes things like this to throw:\r\n\r\n```csharp\r\n{ TelemetryConstants.FromIdProperty, activity.From.Id },\r\n```\r\n\r\n**Changes**:\r\n\r\nAdds a few null-conditional operators, where appropriate.\r\n\r\n**Testing**:\r\n\r\nAdded one test which calls `CreateConversationAsync()` while ensuring the `Activity.From` is null.\n\n\r\n# Changed projects\r\n* Microsoft.Bot.Builder\r\n* integration\r\n* Microsoft.Bot.Builder.Tests\r\n\r\n\r\n\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\"\"\"Middleware Component for logging Activity messages.\"\"\"\n\nfrom typing import Awaitable, Callable, List, Dict\nfrom botbuilder.schema import Activity, ConversationReference, ActivityTypes\nfrom .bot_telemetry_client import BotTelemetryClient\nfrom .bot_assert import BotAssert\nfrom .middleware_set import Middleware\nfrom .null_telemetry_client import NullTelemetryClient\nfrom .turn_context import TurnContext\nfrom .telemetry_constants import TelemetryConstants\nfrom .telemetry_logger_constants import TelemetryLoggerConstants\n\n\n# pylint: disable=line-too-long\nclass TelemetryLoggerMiddleware(Middleware):\n \"\"\"Middleware for logging incoming, outgoing, updated or deleted Activity messages.\"\"\"\n\n def __init__(\n self, telemetry_client: BotTelemetryClient, log_personal_information: bool\n ) -> None:\n super(TelemetryLoggerMiddleware, self).__init__()\n self._telemetry_client = telemetry_client or NullTelemetryClient()\n self._log_personal_information = log_personal_information\n\n @property\n def telemetry_client(self) -> BotTelemetryClient:\n \"\"\"Gets the currently configured BotTelemetryClient.\"\"\"\n return self._telemetry_client\n\n @property\n def log_personal_information(self) -> bool:\n \"\"\" Gets a value indicating whether determines whether to log personal\n information that came from the user.\"\"\"\n return self._log_personal_information\n\n # pylint: disable=arguments-differ\n async def on_turn(\n self, context: TurnContext, logic_fn: Callable[[TurnContext], Awaitable]\n ) -> None:\n \"\"\"Logs events based on incoming and outgoing activities using\n BotTelemetryClient base class\n\n :param turn_context: The context object for this turn.\n :param logic: Callable to continue the bot middleware pipeline\n\n :return: None\n \"\"\"\n BotAssert.context_not_none(context)\n\n # Log incoming activity at beginning of turn\n if context.activity:\n activity = context.activity\n # Log Bot Message Received\n await self.on_receive_activity(activity)\n\n # hook up onSend pipeline\n # pylint: disable=unused-argument\n async def send_activities_handler(\n ctx: TurnContext,\n activities: List[Activity],\n next_send: Callable[[], Awaitable[None]],\n ):\n # Run full pipeline\n responses = await next_send()\n for activity in activities:\n await self.on_send_activity(activity)\n return responses\n\n context.on_send_activities(send_activities_handler)\n\n # hook up update activity pipeline\n async def update_activity_handler(\n ctx: TurnContext, activity: Activity, next_update: Callable[[], Awaitable]\n ):\n # Run full pipeline\n response = await next_update()\n await self.on_update_activity(activity)\n return response\n\n context.on_update_activity(update_activity_handler)\n\n # hook up delete activity pipeline\n async def delete_activity_handler(\n ctx: TurnContext,\n reference: ConversationReference,\n next_delete: Callable[[], Awaitable],\n ):\n # Run full pipeline\n await next_delete()\n\n delete_msg = Activity(\n type=ActivityTypes.message_delete, id=reference.activity_id\n )\n deleted_activity: Activity = TurnContext.apply_conversation_reference(\n delete_msg, reference, False\n )\n await self.on_delete_activity(deleted_activity)\n\n context.on_delete_activity(delete_activity_handler)\n\n if logic_fn:\n await logic_fn()\n\n async def on_receive_activity(self, activity: Activity) -> None:\n \"\"\"Invoked when a message is received from the user.\n Performs logging of telemetry data using the BotTelemetryClient.track_event() method.\n This event name used is \"BotMessageReceived\".\n :param activity: Current activity sent from user.\n \"\"\"\n self.telemetry_client.track_event(\n TelemetryLoggerConstants.BOT_MSG_RECEIVE_EVENT,\n await self.fill_receive_event_properties(activity),\n )\n\n async def on_send_activity(self, activity: Activity) -> None:\n \"\"\"Invoked when the bot sends a message to the user.\n Performs logging of telemetry data using the BotTelemetryClient.track_event() method.\n This event name used is \"BotMessageSend\".\n :param activity: Current activity sent from bot.\n \"\"\"\n self.telemetry_client.track_event(\n TelemetryLoggerConstants.BOT_MSG_SEND_EVENT,\n await self.fill_send_event_properties(activity),\n )\n\n async def on_update_activity(self, activity: Activity) -> None:\n \"\"\"Invoked when the bot updates a message.\n Performs logging of telemetry data using the BotTelemetryClient.track_event() method.\n This event name used is \"BotMessageUpdate\".\n :param activity: Current activity sent from user.\n \"\"\"\n self.telemetry_client.track_event(\n TelemetryLoggerConstants.BOT_MSG_UPDATE_EVENT,\n await self.fill_update_event_properties(activity),\n )\n\n async def on_delete_activity(self, activity: Activity) -> None:\n \"\"\"Invoked when the bot deletes a message.\n Performs logging of telemetry data using the BotTelemetryClient.track_event() method.\n This event name used is \"BotMessageDelete\".\n :param activity: Current activity sent from user.\n \"\"\"\n self.telemetry_client.track_event(\n TelemetryLoggerConstants.BOT_MSG_DELETE_EVENT,\n await self.fill_delete_event_properties(activity),\n )\n\n async def fill_receive_event_properties(\n self, activity: Activity, additional_properties: Dict[str, str] = None\n ) -> Dict[str, str]:\n \"\"\"Fills the event properties for the BotMessageReceived.\n Adheres to the LogPersonalInformation flag to filter Name, Text and Speak properties.\n :param activity: activity sent from user.\n :param additional_properties: Additional properties to add to the event.\n Additional properties can override \"stock\" properties.\n\n :return: A dictionary that is sent as \"Properties\" to\n BotTelemetryClient.track_event method for the BotMessageReceived event.\n \"\"\"\n properties = {\n TelemetryConstants.FROM_ID_PROPERTY: activity.from_property.id,\n TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,\n TelemetryConstants.LOCALE_PROPERTY: activity.locale,\n TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,\n TelemetryConstants.RECIPIENT_NAME_PROPERTY: activity.from_property.name,\n }\n\n if self.log_personal_information:\n if activity.from_property.name and activity.from_property.name.strip():\n properties[\n TelemetryConstants.FROM_NAME_PROPERTY\n ] = activity.from_property.name\n if activity.text and activity.text.strip():\n properties[TelemetryConstants.TEXT_PROPERTY] = activity.text\n if activity.speak and activity.speak.strip():\n properties[TelemetryConstants.SPEAK_PROPERTY] = activity.speak\n\n # Additional properties can override \"stock\" properties\n if additional_properties:\n for prop in additional_properties:\n properties[prop.key] = prop.value\n\n return properties\n\n async def fill_send_event_properties(\n self, activity: Activity, additional_properties: Dict[str, str] = None\n ) -> Dict[str, str]:\n \"\"\"Fills the event properties for the BotMessageSend.\n These properties are logged when an activity message is sent by the Bot to the user.\n :param activity: activity sent from user.\n :param additional_properties: Additional properties to add to the event.\n Additional properties can override \"stock\" properties.\n\n :return: A dictionary that is sent as \"Properties\" to the\n BotTelemetryClient.track_event method for the BotMessageSend event.\n \"\"\"\n properties = {\n TelemetryConstants.REPLY_ACTIVITY_ID_PROPERTY: activity.reply_to_id,\n TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,\n TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,\n TelemetryConstants.LOCALE_PROPERTY: activity.locale,\n }\n\n # Use the LogPersonalInformation flag to toggle logging PII data, text and user name are common examples\n if self.log_personal_information:\n if activity.from_property.name and activity.from_property.name.strip():\n properties[\n TelemetryConstants.FROM_NAME_PROPERTY\n ] = activity.from_property.name\n if activity.text and activity.text.strip():\n properties[TelemetryConstants.TEXT_PROPERTY] = activity.text\n if activity.speak and activity.speak.strip():\n properties[TelemetryConstants.SPEAK_PROPERTY] = activity.speak\n\n # Additional properties can override \"stock\" properties\n if additional_properties:\n for prop in additional_properties:\n properties[prop.key] = prop.value\n\n return properties\n\n async def fill_update_event_properties(\n self, activity: Activity, additional_properties: Dict[str, str] = None\n ) -> Dict[str, str]:\n \"\"\"Fills the event properties for the BotMessageUpdate.\n These properties are logged when an activity message is updated by the Bot.\n For example, if a card is interacted with by the use, and the card needs\n to be updated to reflect some interaction.\n :param activity: activity sent from user.\n :param additional_properties: Additional properties to add to the event.\n Additional properties can override \"stock\" properties.\n\n :return: A dictionary that is sent as \"Properties\" to the\n BotTelemetryClient.track_event method for the BotMessageUpdate event.\n \"\"\"\n properties = {\n TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,\n TelemetryConstants.CONVERSATION_ID_PROPERTY: activity.conversation.id,\n TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,\n TelemetryConstants.LOCALE_PROPERTY: activity.locale,\n }\n\n # Use the LogPersonalInformation flag to toggle logging PII data, text is a common examples\n if self.log_personal_information:\n if activity.text and activity.text.strip():\n properties[TelemetryConstants.TEXT_PROPERTY] = activity.text\n\n # Additional properties can override \"stock\" properties\n if additional_properties:\n for prop in additional_properties:\n properties[prop.key] = prop.value\n\n return properties\n\n async def fill_delete_event_properties(\n self, activity: Activity, additional_properties: Dict[str, str] = None\n ) -> Dict[str, str]:\n \"\"\"Fills the event properties for the BotMessageDelete.\n These properties are logged when an activity message is deleted by the Bot.\n :param activity: activity sent from user.\n :param additional_properties: Additional properties to add to the event.\n Additional properties can override \"stock\" properties.\n\n :return: A dictionary that is sent as \"Properties\" to the\n BotTelemetryClient.track_event method for the BotMessageUpdate event.\n \"\"\"\n properties = {\n TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,\n TelemetryConstants.CONVERSATION_ID_PROPERTY: activity.conversation.id,\n TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,\n }\n\n # Additional properties can override \"stock\" properties\n if additional_properties:\n for prop in additional_properties:\n properties[prop.key] = prop.value\n\n return properties\n", "path": "libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py"}]}
| 3,823 | 310 |
gh_patches_debug_3448
|
rasdani/github-patches
|
git_diff
|
SciTools__cartopy-1245
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SlippyImageArtist cannot be composited
For example, take the WMTS example and add a second layer. Then attempt to save as a PDF.
``` python
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
url = 'http://map1c.vis.earthdata.nasa.gov/wmts-geo/wmts.cgi'
layer1 = 'VIIRS_CityLights_2012'
layer2 = 'ASTER_GDEM_Color_Index'
ax = plt.axes(projection=ccrs.PlateCarree())
ax.add_wmts(url, layer1)
ax.add_wmts(url, layer2)
ax.set_extent((-15, 25, 35, 60))
plt.title('Suomi NPP Earth at night April/October 2012')
plt.savefig('test.pdf')
plt.show()
```
which results in:
``` python
Traceback (most recent call last):
File "wmts.py", line 33, in main
plt.savefig('test.pdf')
File "/usr/lib64/python2.7/site-packages/matplotlib/pyplot.py", line 577, in savefig
res = fig.savefig(*args, **kwargs)
File "/usr/lib64/python2.7/site-packages/matplotlib/figure.py", line 1476, in savefig
self.canvas.print_figure(*args, **kwargs)
File "/usr/lib64/python2.7/site-packages/matplotlib/backends/backend_qt5agg.py", line 161, in print_figure
FigureCanvasAgg.print_figure(self, *args, **kwargs)
File "/usr/lib64/python2.7/site-packages/matplotlib/backend_bases.py", line 2211, in print_figure
**kwargs)
File "/usr/lib64/python2.7/site-packages/matplotlib/backends/backend_pdf.py", line 2485, in print_pdf
self.figure.draw(renderer)
File "/usr/lib64/python2.7/site-packages/matplotlib/artist.py", line 59, in draw_wrapper
draw(artist, renderer, *args, **kwargs)
File "/usr/lib64/python2.7/site-packages/matplotlib/figure.py", line 1085, in draw
func(*args)
File "/usr/lib64/python2.7/site-packages/matplotlib/artist.py", line 59, in draw_wrapper
draw(artist, renderer, *args, **kwargs)
File "/usr/lib64/python2.7/site-packages/cartopy/mpl/geoaxes.py", line 359, in draw
inframe=inframe)
File "/usr/lib64/python2.7/site-packages/matplotlib/artist.py", line 59, in draw_wrapper
draw(artist, renderer, *args, **kwargs)
File "/usr/lib64/python2.7/site-packages/matplotlib/axes/_base.py", line 2081, in draw
for z, im in zorder_images]
File "/usr/lib64/python2.7/site-packages/matplotlib/image.py", line 580, in make_image
raise RuntimeError('You must first set the image'
RuntimeError: You must first set the image array or the image attribute
```
I think maybe `SlippyImageArtist` should be overriding `make_image`, too.
</issue>
<code>
[start of lib/cartopy/mpl/slippy_image_artist.py]
1 # (C) British Crown Copyright 2014 - 2018, Met Office
2 #
3 # This file is part of cartopy.
4 #
5 # cartopy is free software: you can redistribute it and/or modify it under
6 # the terms of the GNU Lesser General Public License as published by the
7 # Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # cartopy is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public License
16 # along with cartopy. If not, see <https://www.gnu.org/licenses/>.
17 """
18 Define the SlippyImageArtist class, which interfaces with
19 :class:`cartopy.io.RasterSource` instances at draw time, for interactive
20 dragging and zooming of raster data.
21
22 """
23
24 from __future__ import (absolute_import, division, print_function)
25
26 from matplotlib.image import AxesImage
27 import matplotlib.artist
28
29
30 class SlippyImageArtist(AxesImage):
31
32 """
33 A subclass of :class:`~matplotlib.image.AxesImage` which provides an
34 interface for getting a raster from the given object with interactive
35 slippy map type functionality.
36
37 Kwargs are passed to the AxesImage constructor.
38
39 """
40 def __init__(self, ax, raster_source, **kwargs):
41 self.raster_source = raster_source
42 super(SlippyImageArtist, self).__init__(ax, **kwargs)
43 self.set_clip_path(ax.background_patch)
44 self.cache = []
45
46 ax.figure.canvas.mpl_connect('button_press_event', self.on_press)
47 ax.figure.canvas.mpl_connect('button_release_event', self.on_release)
48
49 self.on_release()
50
51 def on_press(self, event=None):
52 self.user_is_interacting = True
53
54 def on_release(self, event=None):
55 self.user_is_interacting = False
56 self.stale = True
57
58 @matplotlib.artist.allow_rasterization
59 def draw(self, renderer, *args, **kwargs):
60 if not self.get_visible():
61 return
62
63 ax = self.axes
64 window_extent = ax.get_window_extent()
65 [x1, y1], [x2, y2] = ax.viewLim.get_points()
66 if not self.user_is_interacting:
67 located_images = self.raster_source.fetch_raster(
68 ax.projection, extent=[x1, x2, y1, y2],
69 target_resolution=(window_extent.width, window_extent.height))
70 self.cache = located_images
71
72 for img, extent in self.cache:
73 self.set_array(img)
74 with ax.hold_limits():
75 self.set_extent(extent)
76 super(SlippyImageArtist, self).draw(renderer, *args, **kwargs)
77
[end of lib/cartopy/mpl/slippy_image_artist.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lib/cartopy/mpl/slippy_image_artist.py b/lib/cartopy/mpl/slippy_image_artist.py
--- a/lib/cartopy/mpl/slippy_image_artist.py
+++ b/lib/cartopy/mpl/slippy_image_artist.py
@@ -74,3 +74,8 @@
with ax.hold_limits():
self.set_extent(extent)
super(SlippyImageArtist, self).draw(renderer, *args, **kwargs)
+
+ def can_composite(self):
+ # As per https://github.com/SciTools/cartopy/issues/689, disable
+ # compositing multiple raster sources.
+ return False
|
{"golden_diff": "diff --git a/lib/cartopy/mpl/slippy_image_artist.py b/lib/cartopy/mpl/slippy_image_artist.py\n--- a/lib/cartopy/mpl/slippy_image_artist.py\n+++ b/lib/cartopy/mpl/slippy_image_artist.py\n@@ -74,3 +74,8 @@\n with ax.hold_limits():\n self.set_extent(extent)\n super(SlippyImageArtist, self).draw(renderer, *args, **kwargs)\n+\n+ def can_composite(self):\n+ # As per https://github.com/SciTools/cartopy/issues/689, disable\n+ # compositing multiple raster sources.\n+ return False\n", "issue": "SlippyImageArtist cannot be composited\nFor example, take the WMTS example and add a second layer. Then attempt to save as a PDF.\n\n``` python\nimport cartopy.crs as ccrs\nimport matplotlib.pyplot as plt\n\nurl = 'http://map1c.vis.earthdata.nasa.gov/wmts-geo/wmts.cgi'\nlayer1 = 'VIIRS_CityLights_2012'\nlayer2 = 'ASTER_GDEM_Color_Index'\n\nax = plt.axes(projection=ccrs.PlateCarree())\nax.add_wmts(url, layer1)\nax.add_wmts(url, layer2)\nax.set_extent((-15, 25, 35, 60))\n\nplt.title('Suomi NPP Earth at night April/October 2012')\nplt.savefig('test.pdf')\nplt.show()\n```\n\nwhich results in:\n\n``` python\nTraceback (most recent call last):\n File \"wmts.py\", line 33, in main\n plt.savefig('test.pdf')\n File \"/usr/lib64/python2.7/site-packages/matplotlib/pyplot.py\", line 577, in savefig\n res = fig.savefig(*args, **kwargs)\n File \"/usr/lib64/python2.7/site-packages/matplotlib/figure.py\", line 1476, in savefig\n self.canvas.print_figure(*args, **kwargs)\n File \"/usr/lib64/python2.7/site-packages/matplotlib/backends/backend_qt5agg.py\", line 161, in print_figure\n FigureCanvasAgg.print_figure(self, *args, **kwargs)\n File \"/usr/lib64/python2.7/site-packages/matplotlib/backend_bases.py\", line 2211, in print_figure\n **kwargs)\n File \"/usr/lib64/python2.7/site-packages/matplotlib/backends/backend_pdf.py\", line 2485, in print_pdf\n self.figure.draw(renderer)\n File \"/usr/lib64/python2.7/site-packages/matplotlib/artist.py\", line 59, in draw_wrapper\n draw(artist, renderer, *args, **kwargs)\n File \"/usr/lib64/python2.7/site-packages/matplotlib/figure.py\", line 1085, in draw\n func(*args)\n File \"/usr/lib64/python2.7/site-packages/matplotlib/artist.py\", line 59, in draw_wrapper\n draw(artist, renderer, *args, **kwargs)\n File \"/usr/lib64/python2.7/site-packages/cartopy/mpl/geoaxes.py\", line 359, in draw\n inframe=inframe)\n File \"/usr/lib64/python2.7/site-packages/matplotlib/artist.py\", line 59, in draw_wrapper\n draw(artist, renderer, *args, **kwargs)\n File \"/usr/lib64/python2.7/site-packages/matplotlib/axes/_base.py\", line 2081, in draw\n for z, im in zorder_images]\n File \"/usr/lib64/python2.7/site-packages/matplotlib/image.py\", line 580, in make_image\n raise RuntimeError('You must first set the image'\nRuntimeError: You must first set the image array or the image attribute\n```\n\nI think maybe `SlippyImageArtist` should be overriding `make_image`, too.\n\n", "before_files": [{"content": "# (C) British Crown Copyright 2014 - 2018, Met Office\n#\n# This file is part of cartopy.\n#\n# cartopy is free software: you can redistribute it and/or modify it under\n# the terms of the GNU Lesser General Public License as published by the\n# Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# cartopy is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with cartopy. If not, see <https://www.gnu.org/licenses/>.\n\"\"\"\nDefine the SlippyImageArtist class, which interfaces with\n:class:`cartopy.io.RasterSource` instances at draw time, for interactive\ndragging and zooming of raster data.\n\n\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function)\n\nfrom matplotlib.image import AxesImage\nimport matplotlib.artist\n\n\nclass SlippyImageArtist(AxesImage):\n\n \"\"\"\n A subclass of :class:`~matplotlib.image.AxesImage` which provides an\n interface for getting a raster from the given object with interactive\n slippy map type functionality.\n\n Kwargs are passed to the AxesImage constructor.\n\n \"\"\"\n def __init__(self, ax, raster_source, **kwargs):\n self.raster_source = raster_source\n super(SlippyImageArtist, self).__init__(ax, **kwargs)\n self.set_clip_path(ax.background_patch)\n self.cache = []\n\n ax.figure.canvas.mpl_connect('button_press_event', self.on_press)\n ax.figure.canvas.mpl_connect('button_release_event', self.on_release)\n\n self.on_release()\n\n def on_press(self, event=None):\n self.user_is_interacting = True\n\n def on_release(self, event=None):\n self.user_is_interacting = False\n self.stale = True\n\n @matplotlib.artist.allow_rasterization\n def draw(self, renderer, *args, **kwargs):\n if not self.get_visible():\n return\n\n ax = self.axes\n window_extent = ax.get_window_extent()\n [x1, y1], [x2, y2] = ax.viewLim.get_points()\n if not self.user_is_interacting:\n located_images = self.raster_source.fetch_raster(\n ax.projection, extent=[x1, x2, y1, y2],\n target_resolution=(window_extent.width, window_extent.height))\n self.cache = located_images\n\n for img, extent in self.cache:\n self.set_array(img)\n with ax.hold_limits():\n self.set_extent(extent)\n super(SlippyImageArtist, self).draw(renderer, *args, **kwargs)\n", "path": "lib/cartopy/mpl/slippy_image_artist.py"}]}
| 2,043 | 143 |
gh_patches_debug_40072
|
rasdani/github-patches
|
git_diff
|
pyqtgraph__pyqtgraph-1586
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
RawImageWidget ignores imageAxisOrder config option
### Short description
RawImageWidget ignores imageAxisOrder whereas RawImageGLWidget respects it.
In the included example below, the user needs to manually transpose the image.
### Code to reproduce
```python
import scipy.misc
import pyqtgraph as pg
from pyqtgraph.widgets.RawImageWidget import RawImageWidget, RawImageGLWidget
pg.setConfigOptions(imageAxisOrder='row-major')
data = scipy.misc.face()
app = pg.mkQApp()
win = pg.Qt.QtWidgets.QMainWindow()
if 1:
wgt = RawImageWidget(scaled=True)
# data = data.transpose(1,0,2) # needs a manual transpose
else:
wgt = RawImageGLWidget()
wgt.setImage(data)
win.setCentralWidget(wgt)
win.resize(800, 600)
win.show()
app.exec_()
```
### Expected behavior
<!-- What should happen? -->
Image displayed right-side up
### Real behavior
<!-- What happens? -->
Image displayed transposed
### Tested environment(s)
* PyQtGraph version: 0.11.1.dev0
* Qt Python binding: PyQt5 5.15.2 Qt 5.15.2
* Python version: 3.7.9
* NumPy version: 1.19.4
* Operating system: Windows 10 x64
* Installation method: pip install -e .
### Additional context
None
</issue>
<code>
[start of pyqtgraph/widgets/RawImageWidget.py]
1 # -*- coding: utf-8 -*-
2 """
3 RawImageWidget.py
4 Copyright 2010-2016 Luke Campagnola
5 Distributed under MIT/X11 license. See license.txt for more infomation.
6 """
7
8 from .. import getConfigOption, functions as fn, getCupy
9 from ..Qt import QtCore, QtGui
10
11 try:
12 from ..Qt import QtWidgets
13 from OpenGL.GL import *
14
15 HAVE_OPENGL = True
16 except (ImportError, AttributeError):
17 # Would prefer `except ImportError` here, but some versions of pyopengl generate
18 # AttributeError upon import
19 HAVE_OPENGL = False
20
21
22 class RawImageWidget(QtGui.QWidget):
23 """
24 Widget optimized for very fast video display.
25 Generally using an ImageItem inside GraphicsView is fast enough.
26 On some systems this may provide faster video. See the VideoSpeedTest example for benchmarking.
27 """
28
29 def __init__(self, parent=None, scaled=False):
30 """
31 Setting scaled=True will cause the entire image to be displayed within the boundaries of the widget.
32 This also greatly reduces the speed at which it will draw frames.
33 """
34 QtGui.QWidget.__init__(self, parent)
35 self.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding))
36 self.scaled = scaled
37 self.opts = None
38 self.image = None
39 self._cp = getCupy()
40
41 def setImage(self, img, *args, **kargs):
42 """
43 img must be ndarray of shape (x,y), (x,y,3), or (x,y,4).
44 Extra arguments are sent to functions.makeARGB
45 """
46 self.opts = (img, args, kargs)
47 self.image = None
48 self.update()
49
50 def paintEvent(self, ev):
51 if self.opts is None:
52 return
53 if self.image is None:
54 argb, alpha = fn.makeARGB(self.opts[0], *self.opts[1], **self.opts[2])
55 if self._cp and self._cp.get_array_module(argb) == self._cp:
56 argb = argb.get() # transfer GPU data back to the CPU
57 self.image = fn.makeQImage(argb, alpha)
58 self.opts = ()
59 # if self.pixmap is None:
60 # self.pixmap = QtGui.QPixmap.fromImage(self.image)
61 p = QtGui.QPainter(self)
62 if self.scaled:
63 rect = self.rect()
64 ar = rect.width() / float(rect.height())
65 imar = self.image.width() / float(self.image.height())
66 if ar > imar:
67 rect.setWidth(int(rect.width() * imar / ar))
68 else:
69 rect.setHeight(int(rect.height() * ar / imar))
70
71 p.drawImage(rect, self.image)
72 else:
73 p.drawImage(QtCore.QPointF(), self.image)
74 # p.drawPixmap(self.rect(), self.pixmap)
75 p.end()
76
77
78 if HAVE_OPENGL:
79 class RawImageGLWidget(QtWidgets.QOpenGLWidget):
80 """
81 Similar to RawImageWidget, but uses a GL widget to do all drawing.
82 Perfomance varies between platforms; see examples/VideoSpeedTest for benchmarking.
83
84 Checks if setConfigOptions(imageAxisOrder='row-major') was set.
85 """
86
87 def __init__(self, parent=None, scaled=False):
88 QtWidgets.QOpenGLWidget.__init__(self, parent)
89 self.scaled = scaled
90 self.image = None
91 self.uploaded = False
92 self.smooth = False
93 self.opts = None
94 self.row_major = getConfigOption('imageAxisOrder') == 'row-major'
95
96 def setImage(self, img, *args, **kargs):
97 """
98 img must be ndarray of shape (x,y), (x,y,3), or (x,y,4).
99 Extra arguments are sent to functions.makeARGB
100 """
101 self.opts = (img, args, kargs)
102 self.image = None
103 self.uploaded = False
104 self.update()
105
106 def initializeGL(self):
107 self.texture = glGenTextures(1)
108
109 def uploadTexture(self):
110 glEnable(GL_TEXTURE_2D)
111 glBindTexture(GL_TEXTURE_2D, self.texture)
112 if self.smooth:
113 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
114 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
115 else:
116 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
117 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
118 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER)
119 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER)
120 # glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_BORDER)
121
122 if self.row_major:
123 image = self.image
124 else:
125 image = self.image.transpose((1, 0, 2))
126
127 # ## Test texture dimensions first
128 # shape = self.image.shape
129 # glTexImage2D(GL_PROXY_TEXTURE_2D, 0, GL_RGBA, shape[0], shape[1], 0, GL_RGBA, GL_UNSIGNED_BYTE, None)
130 # if glGetTexLevelParameteriv(GL_PROXY_TEXTURE_2D, 0, GL_TEXTURE_WIDTH) == 0:
131 # raise Exception("OpenGL failed to create 2D texture (%dx%d); too large for this hardware." % shape[:2])
132
133 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, image.shape[1], image.shape[0], 0, GL_RGBA, GL_UNSIGNED_BYTE, image)
134 glDisable(GL_TEXTURE_2D)
135
136 def paintGL(self):
137 glClear(GL_COLOR_BUFFER_BIT)
138
139 if self.image is None:
140 if self.opts is None:
141 return
142 img, args, kwds = self.opts
143 kwds['useRGBA'] = True
144 self.image, alpha = fn.makeARGB(img, *args, **kwds)
145
146 if not self.uploaded:
147 self.uploadTexture()
148
149 glEnable(GL_TEXTURE_2D)
150 glBindTexture(GL_TEXTURE_2D, self.texture)
151 glColor4f(1, 1, 1, 1)
152
153 glBegin(GL_QUADS)
154 glTexCoord2f(0, 1)
155 glVertex3f(-1, -1, 0)
156 glTexCoord2f(1, 1)
157 glVertex3f(1, -1, 0)
158 glTexCoord2f(1, 0)
159 glVertex3f(1, 1, 0)
160 glTexCoord2f(0, 0)
161 glVertex3f(-1, 1, 0)
162 glEnd()
163 glDisable(GL_TEXTURE_3D)
164
[end of pyqtgraph/widgets/RawImageWidget.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pyqtgraph/widgets/RawImageWidget.py b/pyqtgraph/widgets/RawImageWidget.py
--- a/pyqtgraph/widgets/RawImageWidget.py
+++ b/pyqtgraph/widgets/RawImageWidget.py
@@ -43,6 +43,8 @@
img must be ndarray of shape (x,y), (x,y,3), or (x,y,4).
Extra arguments are sent to functions.makeARGB
"""
+ if getConfigOption('imageAxisOrder') == 'row-major':
+ img = img.transpose((1, 0, 2))
self.opts = (img, args, kargs)
self.image = None
self.update()
@@ -79,7 +81,7 @@
class RawImageGLWidget(QtWidgets.QOpenGLWidget):
"""
Similar to RawImageWidget, but uses a GL widget to do all drawing.
- Perfomance varies between platforms; see examples/VideoSpeedTest for benchmarking.
+ Performance varies between platforms; see examples/VideoSpeedTest for benchmarking.
Checks if setConfigOptions(imageAxisOrder='row-major') was set.
"""
@@ -91,7 +93,6 @@
self.uploaded = False
self.smooth = False
self.opts = None
- self.row_major = getConfigOption('imageAxisOrder') == 'row-major'
def setImage(self, img, *args, **kargs):
"""
@@ -118,20 +119,20 @@
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER)
# glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_BORDER)
-
- if self.row_major:
+ if getConfigOption('imageAxisOrder') == 'row-major':
image = self.image
else:
image = self.image.transpose((1, 0, 2))
- # ## Test texture dimensions first
+ ## Test texture dimensions first
# shape = self.image.shape
# glTexImage2D(GL_PROXY_TEXTURE_2D, 0, GL_RGBA, shape[0], shape[1], 0, GL_RGBA, GL_UNSIGNED_BYTE, None)
# if glGetTexLevelParameteriv(GL_PROXY_TEXTURE_2D, 0, GL_TEXTURE_WIDTH) == 0:
- # raise Exception("OpenGL failed to create 2D texture (%dx%d); too large for this hardware." % shape[:2])
+ # raise Exception("OpenGL failed to create 2D texture (%dx%d); too large for this hardware." % shape[:2])
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, image.shape[1], image.shape[0], 0, GL_RGBA, GL_UNSIGNED_BYTE, image)
glDisable(GL_TEXTURE_2D)
+ self.uploaded = True
def paintGL(self):
glClear(GL_COLOR_BUFFER_BIT)
|
{"golden_diff": "diff --git a/pyqtgraph/widgets/RawImageWidget.py b/pyqtgraph/widgets/RawImageWidget.py\n--- a/pyqtgraph/widgets/RawImageWidget.py\n+++ b/pyqtgraph/widgets/RawImageWidget.py\n@@ -43,6 +43,8 @@\n img must be ndarray of shape (x,y), (x,y,3), or (x,y,4).\n Extra arguments are sent to functions.makeARGB\n \"\"\"\n+ if getConfigOption('imageAxisOrder') == 'row-major':\n+ img = img.transpose((1, 0, 2))\n self.opts = (img, args, kargs)\n self.image = None\n self.update()\n@@ -79,7 +81,7 @@\n class RawImageGLWidget(QtWidgets.QOpenGLWidget):\n \"\"\"\n Similar to RawImageWidget, but uses a GL widget to do all drawing.\n- Perfomance varies between platforms; see examples/VideoSpeedTest for benchmarking.\n+ Performance varies between platforms; see examples/VideoSpeedTest for benchmarking.\n \n Checks if setConfigOptions(imageAxisOrder='row-major') was set.\n \"\"\"\n@@ -91,7 +93,6 @@\n self.uploaded = False\n self.smooth = False\n self.opts = None\n- self.row_major = getConfigOption('imageAxisOrder') == 'row-major'\n \n def setImage(self, img, *args, **kargs):\n \"\"\"\n@@ -118,20 +119,20 @@\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER)\n # glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_BORDER)\n-\n- if self.row_major:\n+ if getConfigOption('imageAxisOrder') == 'row-major':\n image = self.image\n else:\n image = self.image.transpose((1, 0, 2))\n \n- # ## Test texture dimensions first\n+ ## Test texture dimensions first\n # shape = self.image.shape\n # glTexImage2D(GL_PROXY_TEXTURE_2D, 0, GL_RGBA, shape[0], shape[1], 0, GL_RGBA, GL_UNSIGNED_BYTE, None)\n # if glGetTexLevelParameteriv(GL_PROXY_TEXTURE_2D, 0, GL_TEXTURE_WIDTH) == 0:\n- # raise Exception(\"OpenGL failed to create 2D texture (%dx%d); too large for this hardware.\" % shape[:2])\n+ # raise Exception(\"OpenGL failed to create 2D texture (%dx%d); too large for this hardware.\" % shape[:2])\n \n glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, image.shape[1], image.shape[0], 0, GL_RGBA, GL_UNSIGNED_BYTE, image)\n glDisable(GL_TEXTURE_2D)\n+ self.uploaded = True\n \n def paintGL(self):\n glClear(GL_COLOR_BUFFER_BIT)\n", "issue": "RawImageWidget ignores imageAxisOrder config option\n### Short description\r\nRawImageWidget ignores imageAxisOrder whereas RawImageGLWidget respects it.\r\nIn the included example below, the user needs to manually transpose the image.\r\n\r\n### Code to reproduce\r\n```python\r\nimport scipy.misc\r\nimport pyqtgraph as pg\r\nfrom pyqtgraph.widgets.RawImageWidget import RawImageWidget, RawImageGLWidget\r\n\r\npg.setConfigOptions(imageAxisOrder='row-major')\r\n\r\ndata = scipy.misc.face()\r\n\r\napp = pg.mkQApp()\r\nwin = pg.Qt.QtWidgets.QMainWindow()\r\nif 1:\r\n wgt = RawImageWidget(scaled=True)\r\n # data = data.transpose(1,0,2) # needs a manual transpose\r\nelse:\r\n wgt = RawImageGLWidget()\r\nwgt.setImage(data)\r\nwin.setCentralWidget(wgt)\r\nwin.resize(800, 600)\r\nwin.show()\r\napp.exec_()\r\n```\r\n\r\n### Expected behavior\r\n<!-- What should happen? -->\r\nImage displayed right-side up\r\n\r\n### Real behavior\r\n<!-- What happens? -->\r\nImage displayed transposed\r\n\r\n### Tested environment(s)\r\n\r\n * PyQtGraph version: 0.11.1.dev0\r\n * Qt Python binding: PyQt5 5.15.2 Qt 5.15.2\r\n * Python version: 3.7.9\r\n * NumPy version: 1.19.4\r\n * Operating system: Windows 10 x64\r\n * Installation method: pip install -e .\r\n\r\n### Additional context\r\nNone\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nRawImageWidget.py\nCopyright 2010-2016 Luke Campagnola\nDistributed under MIT/X11 license. See license.txt for more infomation.\n\"\"\"\n\nfrom .. import getConfigOption, functions as fn, getCupy\nfrom ..Qt import QtCore, QtGui\n\ntry:\n from ..Qt import QtWidgets\n from OpenGL.GL import *\n\n HAVE_OPENGL = True\nexcept (ImportError, AttributeError):\n # Would prefer `except ImportError` here, but some versions of pyopengl generate\n # AttributeError upon import\n HAVE_OPENGL = False\n\n\nclass RawImageWidget(QtGui.QWidget):\n \"\"\"\n Widget optimized for very fast video display.\n Generally using an ImageItem inside GraphicsView is fast enough.\n On some systems this may provide faster video. See the VideoSpeedTest example for benchmarking.\n \"\"\"\n\n def __init__(self, parent=None, scaled=False):\n \"\"\"\n Setting scaled=True will cause the entire image to be displayed within the boundaries of the widget.\n This also greatly reduces the speed at which it will draw frames.\n \"\"\"\n QtGui.QWidget.__init__(self, parent)\n self.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding))\n self.scaled = scaled\n self.opts = None\n self.image = None\n self._cp = getCupy()\n\n def setImage(self, img, *args, **kargs):\n \"\"\"\n img must be ndarray of shape (x,y), (x,y,3), or (x,y,4).\n Extra arguments are sent to functions.makeARGB\n \"\"\"\n self.opts = (img, args, kargs)\n self.image = None\n self.update()\n\n def paintEvent(self, ev):\n if self.opts is None:\n return\n if self.image is None:\n argb, alpha = fn.makeARGB(self.opts[0], *self.opts[1], **self.opts[2])\n if self._cp and self._cp.get_array_module(argb) == self._cp:\n argb = argb.get() # transfer GPU data back to the CPU\n self.image = fn.makeQImage(argb, alpha)\n self.opts = ()\n # if self.pixmap is None:\n # self.pixmap = QtGui.QPixmap.fromImage(self.image)\n p = QtGui.QPainter(self)\n if self.scaled:\n rect = self.rect()\n ar = rect.width() / float(rect.height())\n imar = self.image.width() / float(self.image.height())\n if ar > imar:\n rect.setWidth(int(rect.width() * imar / ar))\n else:\n rect.setHeight(int(rect.height() * ar / imar))\n\n p.drawImage(rect, self.image)\n else:\n p.drawImage(QtCore.QPointF(), self.image)\n # p.drawPixmap(self.rect(), self.pixmap)\n p.end()\n\n\nif HAVE_OPENGL:\n class RawImageGLWidget(QtWidgets.QOpenGLWidget):\n \"\"\"\n Similar to RawImageWidget, but uses a GL widget to do all drawing.\n Perfomance varies between platforms; see examples/VideoSpeedTest for benchmarking.\n\n Checks if setConfigOptions(imageAxisOrder='row-major') was set.\n \"\"\"\n\n def __init__(self, parent=None, scaled=False):\n QtWidgets.QOpenGLWidget.__init__(self, parent)\n self.scaled = scaled\n self.image = None\n self.uploaded = False\n self.smooth = False\n self.opts = None\n self.row_major = getConfigOption('imageAxisOrder') == 'row-major'\n\n def setImage(self, img, *args, **kargs):\n \"\"\"\n img must be ndarray of shape (x,y), (x,y,3), or (x,y,4).\n Extra arguments are sent to functions.makeARGB\n \"\"\"\n self.opts = (img, args, kargs)\n self.image = None\n self.uploaded = False\n self.update()\n\n def initializeGL(self):\n self.texture = glGenTextures(1)\n\n def uploadTexture(self):\n glEnable(GL_TEXTURE_2D)\n glBindTexture(GL_TEXTURE_2D, self.texture)\n if self.smooth:\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\n else:\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER)\n # glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_BORDER)\n\n if self.row_major:\n image = self.image\n else:\n image = self.image.transpose((1, 0, 2))\n\n # ## Test texture dimensions first\n # shape = self.image.shape\n # glTexImage2D(GL_PROXY_TEXTURE_2D, 0, GL_RGBA, shape[0], shape[1], 0, GL_RGBA, GL_UNSIGNED_BYTE, None)\n # if glGetTexLevelParameteriv(GL_PROXY_TEXTURE_2D, 0, GL_TEXTURE_WIDTH) == 0:\n # raise Exception(\"OpenGL failed to create 2D texture (%dx%d); too large for this hardware.\" % shape[:2])\n\n glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, image.shape[1], image.shape[0], 0, GL_RGBA, GL_UNSIGNED_BYTE, image)\n glDisable(GL_TEXTURE_2D)\n\n def paintGL(self):\n glClear(GL_COLOR_BUFFER_BIT)\n\n if self.image is None:\n if self.opts is None:\n return\n img, args, kwds = self.opts\n kwds['useRGBA'] = True\n self.image, alpha = fn.makeARGB(img, *args, **kwds)\n\n if not self.uploaded:\n self.uploadTexture()\n\n glEnable(GL_TEXTURE_2D)\n glBindTexture(GL_TEXTURE_2D, self.texture)\n glColor4f(1, 1, 1, 1)\n\n glBegin(GL_QUADS)\n glTexCoord2f(0, 1)\n glVertex3f(-1, -1, 0)\n glTexCoord2f(1, 1)\n glVertex3f(1, -1, 0)\n glTexCoord2f(1, 0)\n glVertex3f(1, 1, 0)\n glTexCoord2f(0, 0)\n glVertex3f(-1, 1, 0)\n glEnd()\n glDisable(GL_TEXTURE_3D)\n", "path": "pyqtgraph/widgets/RawImageWidget.py"}]}
| 2,706 | 655 |
gh_patches_debug_19761
|
rasdani/github-patches
|
git_diff
|
sopel-irc__sopel-1381
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Ctrl-C doesn't run shutdown routines
When pressing <kbd>Ctrl</kbd>-<kbd>C</kbd> to interrupt a Sopel instance running in the foreground of an active shell, it simply prints `KeyboardInterrupt` and exits seemingly without calling any of the shutdown routines.
Pressing <kbd>Ctrl</kbd>-<kbd>C</kbd> should behave more or less the same as `sopel --quit`.
Discovered while testing for #1369.
</issue>
<code>
[start of sopel/__init__.py]
1 # coding=utf-8
2 # ASCII ONLY IN THIS FILE THOUGH!!!!!!!
3 # Python does some stupid bullshit of respecting LC_ALL over the encoding on the
4 # file, so in order to undo Python's ridiculous fucking idiocy, we have to have
5 # our own check.
6
7 # Copyright 2008, Sean B. Palmer, inamidst.com
8 # Copyright 2012, Elsie Powell, http://embolalia.com
9 # Copyright 2012, Elad Alfassa <[email protected]>
10 #
11 # Licensed under the Eiffel Forum License 2.
12
13 from __future__ import unicode_literals, absolute_import, print_function, division
14
15 import locale
16 import sys
17 loc = locale.getlocale()
18 if sys.version_info.major > 2:
19 if not loc[1] or 'UTF-8' not in loc[1]:
20 print('WARNING!!! You are running with a non-UTF8 locale environment '
21 'variables (e.g. LC_ALL is set to "C"), which makes Python 3 do '
22 'stupid things. If you get strange errors, please set it to '
23 'something like "en_US.UTF-8".', file=sys.stderr)
24
25
26 from collections import namedtuple
27 import os
28 import re
29 import time
30 import traceback
31 import signal
32
33 __version__ = '6.5.3'
34
35
36 def _version_info(version=__version__):
37 regex = re.compile(r'(\d+)\.(\d+)\.(\d+)(?:(a|b|rc)(\d+))?.*')
38 version_groups = regex.match(__version__).groups()
39 major, minor, micro = (int(piece) for piece in version_groups[0:3])
40 level = version_groups[3]
41 serial = int(version_groups[4] or 0)
42 if level == 'a':
43 level = 'alpha'
44 elif level == 'b':
45 level = 'beta'
46 elif level == 'rc':
47 level = 'candidate'
48 elif not level and version_groups[4] is None:
49 level = 'final'
50 else:
51 level = 'alpha'
52 version_type = namedtuple('version_info',
53 'major, minor, micro, releaselevel, serial')
54 return version_type(major, minor, micro, level, serial)
55
56
57 version_info = _version_info()
58
59
60 def run(config, pid_file, daemon=False):
61 import sopel.bot as bot
62 import sopel.logger
63 from sopel.tools import stderr
64 delay = 20
65 # Inject ca_certs from config to web for SSL validation of web requests
66 if not config.core.ca_certs:
67 stderr('Could not open CA certificates file. SSL will not '
68 'work properly.')
69
70 def signal_handler(sig, frame):
71 if sig == signal.SIGUSR1 or sig == signal.SIGTERM:
72 stderr('Got quit signal, shutting down.')
73 p.quit('Closing')
74 while True:
75 try:
76 p = bot.Sopel(config, daemon=daemon)
77 if hasattr(signal, 'SIGUSR1'):
78 signal.signal(signal.SIGUSR1, signal_handler)
79 if hasattr(signal, 'SIGTERM'):
80 signal.signal(signal.SIGTERM, signal_handler)
81 sopel.logger.setup_logging(p)
82 p.run(config.core.host, int(config.core.port))
83 except KeyboardInterrupt:
84 break
85 except Exception: # TODO: Be specific
86 trace = traceback.format_exc()
87 try:
88 stderr(trace)
89 except Exception: # TODO: Be specific
90 pass
91 logfile = open(os.path.join(config.core.logdir, 'exceptions.log'), 'a')
92 logfile.write('Critical exception in core')
93 logfile.write(trace)
94 logfile.write('----------------------------------------\n\n')
95 logfile.close()
96 os.unlink(pid_file)
97 os._exit(1)
98
99 if not isinstance(delay, int):
100 break
101 if p.hasquit:
102 break
103 stderr('Warning: Disconnected. Reconnecting in %s seconds...' % delay)
104 time.sleep(delay)
105 os.unlink(pid_file)
106 os._exit(0)
107
[end of sopel/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sopel/__init__.py b/sopel/__init__.py
--- a/sopel/__init__.py
+++ b/sopel/__init__.py
@@ -68,7 +68,7 @@
'work properly.')
def signal_handler(sig, frame):
- if sig == signal.SIGUSR1 or sig == signal.SIGTERM:
+ if sig == signal.SIGUSR1 or sig == signal.SIGTERM or sig == signal.SIGINT:
stderr('Got quit signal, shutting down.')
p.quit('Closing')
while True:
@@ -78,6 +78,8 @@
signal.signal(signal.SIGUSR1, signal_handler)
if hasattr(signal, 'SIGTERM'):
signal.signal(signal.SIGTERM, signal_handler)
+ if hasattr(signal, 'SIGINT'):
+ signal.signal(signal.SIGINT, signal_handler)
sopel.logger.setup_logging(p)
p.run(config.core.host, int(config.core.port))
except KeyboardInterrupt:
|
{"golden_diff": "diff --git a/sopel/__init__.py b/sopel/__init__.py\n--- a/sopel/__init__.py\n+++ b/sopel/__init__.py\n@@ -68,7 +68,7 @@\n 'work properly.')\n \n def signal_handler(sig, frame):\n- if sig == signal.SIGUSR1 or sig == signal.SIGTERM:\n+ if sig == signal.SIGUSR1 or sig == signal.SIGTERM or sig == signal.SIGINT:\n stderr('Got quit signal, shutting down.')\n p.quit('Closing')\n while True:\n@@ -78,6 +78,8 @@\n signal.signal(signal.SIGUSR1, signal_handler)\n if hasattr(signal, 'SIGTERM'):\n signal.signal(signal.SIGTERM, signal_handler)\n+ if hasattr(signal, 'SIGINT'):\n+ signal.signal(signal.SIGINT, signal_handler)\n sopel.logger.setup_logging(p)\n p.run(config.core.host, int(config.core.port))\n except KeyboardInterrupt:\n", "issue": "Ctrl-C doesn't run shutdown routines\nWhen pressing <kbd>Ctrl</kbd>-<kbd>C</kbd> to interrupt a Sopel instance running in the foreground of an active shell, it simply prints `KeyboardInterrupt` and exits seemingly without calling any of the shutdown routines.\r\n\r\nPressing <kbd>Ctrl</kbd>-<kbd>C</kbd> should behave more or less the same as `sopel --quit`.\r\n\r\nDiscovered while testing for #1369.\n", "before_files": [{"content": "# coding=utf-8\n# ASCII ONLY IN THIS FILE THOUGH!!!!!!!\n# Python does some stupid bullshit of respecting LC_ALL over the encoding on the\n# file, so in order to undo Python's ridiculous fucking idiocy, we have to have\n# our own check.\n\n# Copyright 2008, Sean B. Palmer, inamidst.com\n# Copyright 2012, Elsie Powell, http://embolalia.com\n# Copyright 2012, Elad Alfassa <[email protected]>\n#\n# Licensed under the Eiffel Forum License 2.\n\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport locale\nimport sys\nloc = locale.getlocale()\nif sys.version_info.major > 2:\n if not loc[1] or 'UTF-8' not in loc[1]:\n print('WARNING!!! You are running with a non-UTF8 locale environment '\n 'variables (e.g. LC_ALL is set to \"C\"), which makes Python 3 do '\n 'stupid things. If you get strange errors, please set it to '\n 'something like \"en_US.UTF-8\".', file=sys.stderr)\n\n\nfrom collections import namedtuple\nimport os\nimport re\nimport time\nimport traceback\nimport signal\n\n__version__ = '6.5.3'\n\n\ndef _version_info(version=__version__):\n regex = re.compile(r'(\\d+)\\.(\\d+)\\.(\\d+)(?:(a|b|rc)(\\d+))?.*')\n version_groups = regex.match(__version__).groups()\n major, minor, micro = (int(piece) for piece in version_groups[0:3])\n level = version_groups[3]\n serial = int(version_groups[4] or 0)\n if level == 'a':\n level = 'alpha'\n elif level == 'b':\n level = 'beta'\n elif level == 'rc':\n level = 'candidate'\n elif not level and version_groups[4] is None:\n level = 'final'\n else:\n level = 'alpha'\n version_type = namedtuple('version_info',\n 'major, minor, micro, releaselevel, serial')\n return version_type(major, minor, micro, level, serial)\n\n\nversion_info = _version_info()\n\n\ndef run(config, pid_file, daemon=False):\n import sopel.bot as bot\n import sopel.logger\n from sopel.tools import stderr\n delay = 20\n # Inject ca_certs from config to web for SSL validation of web requests\n if not config.core.ca_certs:\n stderr('Could not open CA certificates file. SSL will not '\n 'work properly.')\n\n def signal_handler(sig, frame):\n if sig == signal.SIGUSR1 or sig == signal.SIGTERM:\n stderr('Got quit signal, shutting down.')\n p.quit('Closing')\n while True:\n try:\n p = bot.Sopel(config, daemon=daemon)\n if hasattr(signal, 'SIGUSR1'):\n signal.signal(signal.SIGUSR1, signal_handler)\n if hasattr(signal, 'SIGTERM'):\n signal.signal(signal.SIGTERM, signal_handler)\n sopel.logger.setup_logging(p)\n p.run(config.core.host, int(config.core.port))\n except KeyboardInterrupt:\n break\n except Exception: # TODO: Be specific\n trace = traceback.format_exc()\n try:\n stderr(trace)\n except Exception: # TODO: Be specific\n pass\n logfile = open(os.path.join(config.core.logdir, 'exceptions.log'), 'a')\n logfile.write('Critical exception in core')\n logfile.write(trace)\n logfile.write('----------------------------------------\\n\\n')\n logfile.close()\n os.unlink(pid_file)\n os._exit(1)\n\n if not isinstance(delay, int):\n break\n if p.hasquit:\n break\n stderr('Warning: Disconnected. Reconnecting in %s seconds...' % delay)\n time.sleep(delay)\n os.unlink(pid_file)\n os._exit(0)\n", "path": "sopel/__init__.py"}]}
| 1,724 | 211 |
gh_patches_debug_30636
|
rasdani/github-patches
|
git_diff
|
ciudadanointeligente__votainteligente-portal-electoral-260
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Extra info is the same for all candidates
When using extra_info such as ribbon or colors the same values are returned for all candidates in cases such as election page or soul-mate JSON.
The correct value is returned in cases where information for only one candidate is requested, such as the candidate detail page.
</issue>
<code>
[start of elections/models.py]
1 # coding=utf-8
2 from django.db import models
3 from autoslug import AutoSlugField
4 from taggit.managers import TaggableManager
5 from django.core.urlresolvers import reverse
6 from popolo.models import Person, Area
7 from django.utils.translation import ugettext as _
8 from markdown_deux.templatetags.markdown_deux_tags import markdown_allowed
9 from candidator.models import Category, Topic as CanTopic
10 from picklefield.fields import PickledObjectField
11 from django.conf import settings
12
13
14 class ExtraInfoMixin(models.Model):
15 extra_info = PickledObjectField(default={})
16
17 class Meta:
18 abstract = True
19
20 def __init__(self, *args, **kwargs):
21 super(ExtraInfoMixin, self).__init__(*args, **kwargs)
22 default_extra_info = self.default_extra_info
23 default_extra_info.update(self.extra_info)
24 self.extra_info = default_extra_info
25
26
27 class Candidate(Person, ExtraInfoMixin):
28 election = models.ForeignKey('Election', related_name='candidates', null=True)
29
30 default_extra_info = settings.DEFAULT_CANDIDATE_EXTRA_INFO
31
32 @property
33 def twitter(self):
34 links = self.contact_details.filter(contact_type="TWITTER")
35 if links:
36 return links.first()
37
38
39 class PersonalData(models.Model):
40 candidate = models.ForeignKey('Candidate', related_name="personal_datas")
41 label = models.CharField(max_length=512)
42 value = models.CharField(max_length=1024)
43
44
45 class Topic(CanTopic):
46 class Meta:
47 proxy = True
48
49 @property
50 def election(self):
51 category = QuestionCategory.objects.get(category_ptr=self.category)
52 return category.election
53
54
55 class QuestionCategory(Category):
56 election = models.ForeignKey('Election', related_name='categories', null=True)
57
58
59 class Election(ExtraInfoMixin, models.Model):
60 name = models.CharField(max_length=255)
61 slug = AutoSlugField(populate_from='name', unique=True)
62 description = models.TextField(blank=True)
63 tags = TaggableManager(blank=True)
64 searchable = models.BooleanField(default=True)
65 highlighted = models.BooleanField(default=False)
66 extra_info_title = models.CharField(max_length=50, blank=True, null=True)
67 extra_info_content = models.TextField(max_length=3000, blank=True, null=True, help_text=_("Puedes usar Markdown. <br/> ")
68 + markdown_allowed())
69 uses_preguntales = models.BooleanField(default=True, help_text=_(u"Esta elección debe usar preguntales?"))
70 uses_ranking = models.BooleanField(default=True, help_text=_(u"Esta elección debe usar ranking"))
71 uses_face_to_face = models.BooleanField(default=True, help_text=_(u"Esta elección debe usar frente a frente"))
72 uses_soul_mate = models.BooleanField(default=True, help_text=_(u"Esta elección debe usar 1/2 naranja"))
73 uses_questionary = models.BooleanField(default=True, help_text=_(u"Esta elección debe usar cuestionario"))
74
75 default_extra_info = settings.DEFAULT_ELECTION_EXTRA_INFO
76 area = models.ForeignKey(Area, null=True, related_name="elections")
77
78 def __unicode__(self):
79 return self.name
80
81 def get_absolute_url(self):
82 return reverse('election_view', kwargs={'slug': self.slug})
83
84 def get_extra_info_url(self):
85 return reverse('election_extra_info', kwargs={'slug': self.slug})
86
87 class Meta:
88 verbose_name = _(u'Mi Elección')
89 verbose_name_plural = _(u'Mis Elecciones')
90
[end of elections/models.py]
[start of elections/admin.py]
1
2 from django.contrib import admin
3 from elections.models import Election, Candidate, PersonalData, QuestionCategory
4 from popolo.models import Organization, Membership, ContactDetail, OtherName, Post, Area
5 from django.contrib.contenttypes.admin import GenericTabularInline
6 from django import forms
7 from django.conf import settings
8 from candidator.models import Position, TakenPosition
9 from elections.models import Topic
10
11
12 class TakenPositionModelForm(forms.ModelForm):
13
14 def __init__(self, *args, **kwargs):
15 super(TakenPositionModelForm, self).__init__(*args, **kwargs)
16 if self.instance.id:
17 positions = self.instance.topic.positions.all()
18 self.fields['position'].queryset = positions
19
20 class Meta:
21 model = TakenPosition
22 fields = ('topic', 'position', 'person')
23
24
25 class TakenPositionInlineModelForm(forms.ModelForm):
26 def __init__(self, *args, **kwargs):
27 super(TakenPositionInlineModelForm, self).__init__(*args, **kwargs)
28 if 'instance' in kwargs:
29 positions_qs = kwargs['instance'].topic.positions.all()
30 self.fields['position'].queryset = positions_qs
31
32 class Meta:
33 model = TakenPosition
34 fields = ('position', 'description')
35
36 def save(self, force_insert=False, force_update=False, commit=True):
37 m = super(TakenPositionInlineModelForm, self).save(commit=False)
38 if m.position is not None:
39 m.topic = m.position.topic
40 m.save()
41 return m
42
43
44 class TakenPositionCandidateInline(admin.TabularInline):
45 model = TakenPosition
46 form = TakenPositionInlineModelForm
47 extra = 0
48
49 def formfield_for_foreignkey(self, db_field, request, **kwargs):
50 if db_field.name == 'position':
51 pass
52 return super(TakenPositionCandidateInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
53
54
55 class TakenPositionAdmin(admin.ModelAdmin):
56 form = TakenPositionModelForm
57 admin.site.register(TakenPosition, TakenPositionAdmin)
58
59
60 class TakenPositionInline(admin.TabularInline):
61 model = TakenPosition
62
63
64 class PositionAdmin(admin.ModelAdmin):
65 inlines = [TakenPositionInline, ]
66
67 admin.site.register(Position, PositionAdmin)
68
69
70 class PositionInline(admin.TabularInline):
71 model = Position
72
73
74 class TopicAdmin(admin.ModelAdmin):
75 inlines = [PositionInline, ]
76 list_display = ('__str__', 'election')
77
78 def save_model(self, request, obj, form, change):
79 creating = not change
80 obj.save()
81 if creating:
82 for candidate in obj.election.candidates.all():
83 TakenPosition.objects.get_or_create(topic=obj, person=candidate)
84
85 admin.site.register(Topic, TopicAdmin)
86
87
88 class TopicInline(admin.TabularInline):
89 model = Topic
90
91
92 class QuestionCategoryAdmin(admin.ModelAdmin):
93 inlines = [TopicInline, ]
94 list_display = ('__str__', 'election')
95 admin.site.register(QuestionCategory, QuestionCategoryAdmin)
96
97
98 class QuestionCategoryInline(admin.TabularInline):
99 model = QuestionCategory
100
101 list_display = ('__str__', 'election')
102
103
104 class CandidateModelForm(forms.ModelForm):
105 model = Candidate
106
107 def __init__(self, *args, **kwargs):
108 super(CandidateModelForm, self).__init__(*args, **kwargs)
109 self.extra_info_fields = settings.DEFAULT_CANDIDATE_EXTRA_INFO.keys()
110 for key in self.extra_info_fields:
111 self.fields.update({key: forms.CharField(max_length=512,
112 label=key,
113 required=False,
114 widget=forms.TextInput(),
115 initial=self.instance.extra_info[key]
116 )})
117
118 def save(self, commit=True, *args, **kwargs):
119 instance = super(CandidateModelForm, self).save(commit, *args, **kwargs)
120 for key in self.extra_info_fields:
121 instance.extra_info[key] = self.cleaned_data.get(key, None)
122 if commit:
123 instance.save()
124
125 return instance
126
127
128 class ElectionModelForm(forms.ModelForm):
129 model = Election
130
131 def __init__(self, *args, **kwargs):
132 super(ElectionModelForm, self).__init__(*args, **kwargs)
133 self.extra_info_fields = settings.DEFAULT_ELECTION_EXTRA_INFO.keys()
134 for key in self.extra_info_fields:
135 self.fields.update({key: forms.CharField(max_length=512,
136 label=key,
137 required=False,
138 widget=forms.TextInput(),
139 initial=self.instance.extra_info[key]
140 )})
141
142 def save(self, commit=True, *args, **kwargs):
143 instance = super(ElectionModelForm, self).save(commit, *args, **kwargs)
144 for key in self.extra_info_fields:
145 instance.extra_info[key] = self.cleaned_data.get(key, None)
146 if commit:
147 instance.save()
148 return instance
149
150
151 class ElectionAdmin(admin.ModelAdmin):
152 form = ElectionModelForm
153 search_fields = ['name', 'tags']
154 inlines = [QuestionCategoryInline, ]
155
156 def get_fieldsets(self, request, obj=None):
157 fieldsets = super(ElectionAdmin, self).get_fieldsets(request, obj)
158 if hasattr(request, "_gfs_marker"):
159 for key in settings.DEFAULT_ELECTION_EXTRA_INFO.keys():
160 fieldsets[0][1]['fields'] += (key,)
161 setattr(request, "_gfs_marker", 1)
162 return fieldsets
163
164 admin.site.register(Election, ElectionAdmin)
165
166
167 class OrgnizationAdmin(admin.ModelAdmin):
168 pass
169 admin.site.register(Organization, OrgnizationAdmin)
170
171
172 class ContactDetailInline(GenericTabularInline):
173 model = ContactDetail
174 fields = ('label', 'contact_type', 'value')
175
176
177 class MembershipInline(admin.TabularInline):
178 model = Membership
179 fields = ('label', 'role', 'organization', 'on_behalf_of', 'post', 'start_date', 'end_date', 'area')
180
181
182 class OtherNameInline(GenericTabularInline):
183 model = OtherName
184
185
186 class PersonalDataInline(admin.TabularInline):
187 model = PersonalData
188
189
190 class CandidateAdmin(admin.ModelAdmin):
191 form = CandidateModelForm
192 inlines = [
193 ContactDetailInline,
194 MembershipInline,
195 OtherNameInline,
196 PersonalDataInline,
197 TakenPositionCandidateInline,
198 ]
199 search_fields = ['name', 'election__name']
200 ordering = ['name']
201
202 def get_fieldsets(self, request, obj=None):
203 fieldsets = super(CandidateAdmin, self).get_fieldsets(request, obj)
204 if hasattr(request, "_gfs_marker"):
205 for key in settings.DEFAULT_CANDIDATE_EXTRA_INFO.keys():
206 fieldsets[0][1]['fields'] += (key,)
207 setattr(request, "_gfs_marker", 1)
208 return fieldsets
209
210 def save_model(self, request, obj, form, change):
211 creating = not change
212 obj.save()
213 if creating:
214 for cat in obj.election.categories.all():
215 for topic in cat.topics.all():
216 TakenPosition.objects.get_or_create(topic=topic, person=obj)
217
218 admin.site.register(Candidate, CandidateAdmin)
219
220
221 class PostAdmin(admin.ModelAdmin):
222 pass
223 admin.site.register(Post, PostAdmin)
224
225
226 class AreaAdmin(admin.ModelAdmin):
227 pass
228 admin.site.register(Area, AreaAdmin)
229
[end of elections/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/elections/admin.py b/elections/admin.py
--- a/elections/admin.py
+++ b/elections/admin.py
@@ -45,6 +45,7 @@
model = TakenPosition
form = TakenPositionInlineModelForm
extra = 0
+ can_delete = False
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == 'position':
diff --git a/elections/models.py b/elections/models.py
--- a/elections/models.py
+++ b/elections/models.py
@@ -9,6 +9,8 @@
from candidator.models import Category, Topic as CanTopic
from picklefield.fields import PickledObjectField
from django.conf import settings
+from django.utils.encoding import python_2_unicode_compatible
+import copy
class ExtraInfoMixin(models.Model):
@@ -19,7 +21,7 @@
def __init__(self, *args, **kwargs):
super(ExtraInfoMixin, self).__init__(*args, **kwargs)
- default_extra_info = self.default_extra_info
+ default_extra_info = copy.copy(self.default_extra_info)
default_extra_info.update(self.extra_info)
self.extra_info = default_extra_info
@@ -52,9 +54,13 @@
return category.election
+@python_2_unicode_compatible
class QuestionCategory(Category):
election = models.ForeignKey('Election', related_name='categories', null=True)
+ def __str__(self):
+ return u'<%s> in <%s>' % (self.name, self.election.name)
+
class Election(ExtraInfoMixin, models.Model):
name = models.CharField(max_length=255)
|
{"golden_diff": "diff --git a/elections/admin.py b/elections/admin.py\n--- a/elections/admin.py\n+++ b/elections/admin.py\n@@ -45,6 +45,7 @@\n model = TakenPosition\n form = TakenPositionInlineModelForm\n extra = 0\n+ can_delete = False\n \n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n if db_field.name == 'position':\ndiff --git a/elections/models.py b/elections/models.py\n--- a/elections/models.py\n+++ b/elections/models.py\n@@ -9,6 +9,8 @@\n from candidator.models import Category, Topic as CanTopic\n from picklefield.fields import PickledObjectField\n from django.conf import settings\n+from django.utils.encoding import python_2_unicode_compatible\n+import copy\n \n \n class ExtraInfoMixin(models.Model):\n@@ -19,7 +21,7 @@\n \n def __init__(self, *args, **kwargs):\n super(ExtraInfoMixin, self).__init__(*args, **kwargs)\n- default_extra_info = self.default_extra_info\n+ default_extra_info = copy.copy(self.default_extra_info)\n default_extra_info.update(self.extra_info)\n self.extra_info = default_extra_info\n \n@@ -52,9 +54,13 @@\n return category.election\n \n \n+@python_2_unicode_compatible\n class QuestionCategory(Category):\n election = models.ForeignKey('Election', related_name='categories', null=True)\n \n+ def __str__(self):\n+ return u'<%s> in <%s>' % (self.name, self.election.name)\n+\n \n class Election(ExtraInfoMixin, models.Model):\n name = models.CharField(max_length=255)\n", "issue": "Extra info is the same for all candidates\nWhen using extra_info such as ribbon or colors the same values are returned for all candidates in cases such as election page or soul-mate JSON.\nThe correct value is returned in cases where information for only one candidate is requested, such as the candidate detail page.\n\n", "before_files": [{"content": "# coding=utf-8\nfrom django.db import models\nfrom autoslug import AutoSlugField\nfrom taggit.managers import TaggableManager\nfrom django.core.urlresolvers import reverse\nfrom popolo.models import Person, Area\nfrom django.utils.translation import ugettext as _\nfrom markdown_deux.templatetags.markdown_deux_tags import markdown_allowed\nfrom candidator.models import Category, Topic as CanTopic\nfrom picklefield.fields import PickledObjectField\nfrom django.conf import settings\n\n\nclass ExtraInfoMixin(models.Model):\n extra_info = PickledObjectField(default={})\n\n class Meta:\n abstract = True\n\n def __init__(self, *args, **kwargs):\n super(ExtraInfoMixin, self).__init__(*args, **kwargs)\n default_extra_info = self.default_extra_info\n default_extra_info.update(self.extra_info)\n self.extra_info = default_extra_info\n\n\nclass Candidate(Person, ExtraInfoMixin):\n election = models.ForeignKey('Election', related_name='candidates', null=True)\n\n default_extra_info = settings.DEFAULT_CANDIDATE_EXTRA_INFO\n\n @property\n def twitter(self):\n links = self.contact_details.filter(contact_type=\"TWITTER\")\n if links:\n return links.first()\n\n\nclass PersonalData(models.Model):\n candidate = models.ForeignKey('Candidate', related_name=\"personal_datas\")\n label = models.CharField(max_length=512)\n value = models.CharField(max_length=1024)\n\n\nclass Topic(CanTopic):\n class Meta:\n proxy = True\n\n @property\n def election(self):\n category = QuestionCategory.objects.get(category_ptr=self.category)\n return category.election\n\n\nclass QuestionCategory(Category):\n election = models.ForeignKey('Election', related_name='categories', null=True)\n\n\nclass Election(ExtraInfoMixin, models.Model):\n name = models.CharField(max_length=255)\n slug = AutoSlugField(populate_from='name', unique=True)\n description = models.TextField(blank=True)\n tags = TaggableManager(blank=True)\n searchable = models.BooleanField(default=True)\n highlighted = models.BooleanField(default=False)\n extra_info_title = models.CharField(max_length=50, blank=True, null=True)\n extra_info_content = models.TextField(max_length=3000, blank=True, null=True, help_text=_(\"Puedes usar Markdown. <br/> \")\n + markdown_allowed())\n uses_preguntales = models.BooleanField(default=True, help_text=_(u\"Esta elecci\u00f3n debe usar preguntales?\"))\n uses_ranking = models.BooleanField(default=True, help_text=_(u\"Esta elecci\u00f3n debe usar ranking\"))\n uses_face_to_face = models.BooleanField(default=True, help_text=_(u\"Esta elecci\u00f3n debe usar frente a frente\"))\n uses_soul_mate = models.BooleanField(default=True, help_text=_(u\"Esta elecci\u00f3n debe usar 1/2 naranja\"))\n uses_questionary = models.BooleanField(default=True, help_text=_(u\"Esta elecci\u00f3n debe usar cuestionario\"))\n\n default_extra_info = settings.DEFAULT_ELECTION_EXTRA_INFO\n area = models.ForeignKey(Area, null=True, related_name=\"elections\")\n\n def __unicode__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse('election_view', kwargs={'slug': self.slug})\n\n def get_extra_info_url(self):\n return reverse('election_extra_info', kwargs={'slug': self.slug})\n\n class Meta:\n verbose_name = _(u'Mi Elecci\u00f3n')\n verbose_name_plural = _(u'Mis Elecciones')\n", "path": "elections/models.py"}, {"content": "\nfrom django.contrib import admin\nfrom elections.models import Election, Candidate, PersonalData, QuestionCategory\nfrom popolo.models import Organization, Membership, ContactDetail, OtherName, Post, Area\nfrom django.contrib.contenttypes.admin import GenericTabularInline\nfrom django import forms\nfrom django.conf import settings\nfrom candidator.models import Position, TakenPosition\nfrom elections.models import Topic\n\n\nclass TakenPositionModelForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n super(TakenPositionModelForm, self).__init__(*args, **kwargs)\n if self.instance.id:\n positions = self.instance.topic.positions.all()\n self.fields['position'].queryset = positions\n\n class Meta:\n model = TakenPosition\n fields = ('topic', 'position', 'person')\n\n\nclass TakenPositionInlineModelForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(TakenPositionInlineModelForm, self).__init__(*args, **kwargs)\n if 'instance' in kwargs:\n positions_qs = kwargs['instance'].topic.positions.all()\n self.fields['position'].queryset = positions_qs\n\n class Meta:\n model = TakenPosition\n fields = ('position', 'description')\n\n def save(self, force_insert=False, force_update=False, commit=True):\n m = super(TakenPositionInlineModelForm, self).save(commit=False)\n if m.position is not None:\n m.topic = m.position.topic\n m.save()\n return m\n\n\nclass TakenPositionCandidateInline(admin.TabularInline):\n model = TakenPosition\n form = TakenPositionInlineModelForm\n extra = 0\n\n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n if db_field.name == 'position':\n pass\n return super(TakenPositionCandidateInline, self).formfield_for_foreignkey(db_field, request, **kwargs)\n\n\nclass TakenPositionAdmin(admin.ModelAdmin):\n form = TakenPositionModelForm\nadmin.site.register(TakenPosition, TakenPositionAdmin)\n\n\nclass TakenPositionInline(admin.TabularInline):\n model = TakenPosition\n\n\nclass PositionAdmin(admin.ModelAdmin):\n inlines = [TakenPositionInline, ]\n\nadmin.site.register(Position, PositionAdmin)\n\n\nclass PositionInline(admin.TabularInline):\n model = Position\n\n\nclass TopicAdmin(admin.ModelAdmin):\n inlines = [PositionInline, ]\n list_display = ('__str__', 'election')\n\n def save_model(self, request, obj, form, change):\n creating = not change\n obj.save()\n if creating:\n for candidate in obj.election.candidates.all():\n TakenPosition.objects.get_or_create(topic=obj, person=candidate)\n\nadmin.site.register(Topic, TopicAdmin)\n\n\nclass TopicInline(admin.TabularInline):\n model = Topic\n\n\nclass QuestionCategoryAdmin(admin.ModelAdmin):\n inlines = [TopicInline, ]\n list_display = ('__str__', 'election')\nadmin.site.register(QuestionCategory, QuestionCategoryAdmin)\n\n\nclass QuestionCategoryInline(admin.TabularInline):\n model = QuestionCategory\n\n list_display = ('__str__', 'election')\n\n\nclass CandidateModelForm(forms.ModelForm):\n model = Candidate\n\n def __init__(self, *args, **kwargs):\n super(CandidateModelForm, self).__init__(*args, **kwargs)\n self.extra_info_fields = settings.DEFAULT_CANDIDATE_EXTRA_INFO.keys()\n for key in self.extra_info_fields:\n self.fields.update({key: forms.CharField(max_length=512,\n label=key,\n required=False,\n widget=forms.TextInput(),\n initial=self.instance.extra_info[key]\n )})\n\n def save(self, commit=True, *args, **kwargs):\n instance = super(CandidateModelForm, self).save(commit, *args, **kwargs)\n for key in self.extra_info_fields:\n instance.extra_info[key] = self.cleaned_data.get(key, None)\n if commit:\n instance.save()\n\n return instance\n\n\nclass ElectionModelForm(forms.ModelForm):\n model = Election\n\n def __init__(self, *args, **kwargs):\n super(ElectionModelForm, self).__init__(*args, **kwargs)\n self.extra_info_fields = settings.DEFAULT_ELECTION_EXTRA_INFO.keys()\n for key in self.extra_info_fields:\n self.fields.update({key: forms.CharField(max_length=512,\n label=key,\n required=False,\n widget=forms.TextInput(),\n initial=self.instance.extra_info[key]\n )})\n\n def save(self, commit=True, *args, **kwargs):\n instance = super(ElectionModelForm, self).save(commit, *args, **kwargs)\n for key in self.extra_info_fields:\n instance.extra_info[key] = self.cleaned_data.get(key, None)\n if commit:\n instance.save()\n return instance\n\n\nclass ElectionAdmin(admin.ModelAdmin):\n form = ElectionModelForm\n search_fields = ['name', 'tags']\n inlines = [QuestionCategoryInline, ]\n\n def get_fieldsets(self, request, obj=None):\n fieldsets = super(ElectionAdmin, self).get_fieldsets(request, obj)\n if hasattr(request, \"_gfs_marker\"):\n for key in settings.DEFAULT_ELECTION_EXTRA_INFO.keys():\n fieldsets[0][1]['fields'] += (key,)\n setattr(request, \"_gfs_marker\", 1)\n return fieldsets\n\nadmin.site.register(Election, ElectionAdmin)\n\n\nclass OrgnizationAdmin(admin.ModelAdmin):\n pass\nadmin.site.register(Organization, OrgnizationAdmin)\n\n\nclass ContactDetailInline(GenericTabularInline):\n model = ContactDetail\n fields = ('label', 'contact_type', 'value')\n\n\nclass MembershipInline(admin.TabularInline):\n model = Membership\n fields = ('label', 'role', 'organization', 'on_behalf_of', 'post', 'start_date', 'end_date', 'area')\n\n\nclass OtherNameInline(GenericTabularInline):\n model = OtherName\n\n\nclass PersonalDataInline(admin.TabularInline):\n model = PersonalData\n\n\nclass CandidateAdmin(admin.ModelAdmin):\n form = CandidateModelForm\n inlines = [\n ContactDetailInline,\n MembershipInline,\n OtherNameInline,\n PersonalDataInline,\n TakenPositionCandidateInline,\n ]\n search_fields = ['name', 'election__name']\n ordering = ['name']\n\n def get_fieldsets(self, request, obj=None):\n fieldsets = super(CandidateAdmin, self).get_fieldsets(request, obj)\n if hasattr(request, \"_gfs_marker\"):\n for key in settings.DEFAULT_CANDIDATE_EXTRA_INFO.keys():\n fieldsets[0][1]['fields'] += (key,)\n setattr(request, \"_gfs_marker\", 1)\n return fieldsets\n\n def save_model(self, request, obj, form, change):\n creating = not change\n obj.save()\n if creating:\n for cat in obj.election.categories.all():\n for topic in cat.topics.all():\n TakenPosition.objects.get_or_create(topic=topic, person=obj)\n\nadmin.site.register(Candidate, CandidateAdmin)\n\n\nclass PostAdmin(admin.ModelAdmin):\n pass\nadmin.site.register(Post, PostAdmin)\n\n\nclass AreaAdmin(admin.ModelAdmin):\n pass\nadmin.site.register(Area, AreaAdmin)\n", "path": "elections/admin.py"}]}
| 3,672 | 375 |
gh_patches_debug_40194
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-python-2033
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Do not add redis SET data in span description when `set_default_pii=False`
### Problem Statement
Currently the redis integration records a span for all redis commands. This is good.
But when `send_default_pii=False` the value of the redis SET command (https://redis.io/commands/set/) should be redacted.
### Solution Brainstorm
do it
</issue>
<code>
[start of sentry_sdk/integrations/redis.py]
1 from __future__ import absolute_import
2
3 from sentry_sdk import Hub
4 from sentry_sdk.consts import OP
5 from sentry_sdk.utils import capture_internal_exceptions, logger
6 from sentry_sdk.integrations import Integration, DidNotEnable
7
8 from sentry_sdk._types import TYPE_CHECKING
9
10 if TYPE_CHECKING:
11 from typing import Any, Sequence
12
13 _SINGLE_KEY_COMMANDS = frozenset(
14 ["decr", "decrby", "get", "incr", "incrby", "pttl", "set", "setex", "setnx", "ttl"]
15 )
16 _MULTI_KEY_COMMANDS = frozenset(["del", "touch", "unlink"])
17
18 #: Trim argument lists to this many values
19 _MAX_NUM_ARGS = 10
20
21
22 def patch_redis_pipeline(pipeline_cls, is_cluster, get_command_args_fn):
23 # type: (Any, bool, Any) -> None
24 old_execute = pipeline_cls.execute
25
26 def sentry_patched_execute(self, *args, **kwargs):
27 # type: (Any, *Any, **Any) -> Any
28 hub = Hub.current
29
30 if hub.get_integration(RedisIntegration) is None:
31 return old_execute(self, *args, **kwargs)
32
33 with hub.start_span(
34 op=OP.DB_REDIS, description="redis.pipeline.execute"
35 ) as span:
36 with capture_internal_exceptions():
37 span.set_tag("redis.is_cluster", is_cluster)
38 transaction = self.transaction if not is_cluster else False
39 span.set_tag("redis.transaction", transaction)
40
41 commands = []
42 for i, arg in enumerate(self.command_stack):
43 if i > _MAX_NUM_ARGS:
44 break
45 command_args = []
46 for j, command_arg in enumerate(get_command_args_fn(arg)):
47 if j > 0:
48 command_arg = repr(command_arg)
49 command_args.append(command_arg)
50 commands.append(" ".join(command_args))
51
52 span.set_data(
53 "redis.commands",
54 {"count": len(self.command_stack), "first_ten": commands},
55 )
56
57 return old_execute(self, *args, **kwargs)
58
59 pipeline_cls.execute = sentry_patched_execute
60
61
62 def _get_redis_command_args(command):
63 # type: (Any) -> Sequence[Any]
64 return command[0]
65
66
67 def _parse_rediscluster_command(command):
68 # type: (Any) -> Sequence[Any]
69 return command.args
70
71
72 def _patch_rediscluster():
73 # type: () -> None
74 try:
75 import rediscluster # type: ignore
76 except ImportError:
77 return
78
79 patch_redis_client(rediscluster.RedisCluster, is_cluster=True)
80
81 # up to v1.3.6, __version__ attribute is a tuple
82 # from v2.0.0, __version__ is a string and VERSION a tuple
83 version = getattr(rediscluster, "VERSION", rediscluster.__version__)
84
85 # StrictRedisCluster was introduced in v0.2.0 and removed in v2.0.0
86 # https://github.com/Grokzen/redis-py-cluster/blob/master/docs/release-notes.rst
87 if (0, 2, 0) < version < (2, 0, 0):
88 pipeline_cls = rediscluster.pipeline.StrictClusterPipeline
89 patch_redis_client(rediscluster.StrictRedisCluster, is_cluster=True)
90 else:
91 pipeline_cls = rediscluster.pipeline.ClusterPipeline
92
93 patch_redis_pipeline(pipeline_cls, True, _parse_rediscluster_command)
94
95
96 class RedisIntegration(Integration):
97 identifier = "redis"
98
99 @staticmethod
100 def setup_once():
101 # type: () -> None
102 try:
103 import redis
104 except ImportError:
105 raise DidNotEnable("Redis client not installed")
106
107 patch_redis_client(redis.StrictRedis, is_cluster=False)
108 patch_redis_pipeline(redis.client.Pipeline, False, _get_redis_command_args)
109 try:
110 strict_pipeline = redis.client.StrictPipeline # type: ignore
111 except AttributeError:
112 pass
113 else:
114 patch_redis_pipeline(strict_pipeline, False, _get_redis_command_args)
115
116 try:
117 import rb.clients # type: ignore
118 except ImportError:
119 pass
120 else:
121 patch_redis_client(rb.clients.FanoutClient, is_cluster=False)
122 patch_redis_client(rb.clients.MappingClient, is_cluster=False)
123 patch_redis_client(rb.clients.RoutingClient, is_cluster=False)
124
125 try:
126 _patch_rediscluster()
127 except Exception:
128 logger.exception("Error occurred while patching `rediscluster` library")
129
130
131 def patch_redis_client(cls, is_cluster):
132 # type: (Any, bool) -> None
133 """
134 This function can be used to instrument custom redis client classes or
135 subclasses.
136 """
137 old_execute_command = cls.execute_command
138
139 def sentry_patched_execute_command(self, name, *args, **kwargs):
140 # type: (Any, str, *Any, **Any) -> Any
141 hub = Hub.current
142
143 if hub.get_integration(RedisIntegration) is None:
144 return old_execute_command(self, name, *args, **kwargs)
145
146 description = name
147
148 with capture_internal_exceptions():
149 description_parts = [name]
150 for i, arg in enumerate(args):
151 if i > _MAX_NUM_ARGS:
152 break
153
154 description_parts.append(repr(arg))
155
156 description = " ".join(description_parts)
157
158 with hub.start_span(op=OP.DB_REDIS, description=description) as span:
159 span.set_tag("redis.is_cluster", is_cluster)
160 if name:
161 span.set_tag("redis.command", name)
162
163 if name and args:
164 name_low = name.lower()
165 if (name_low in _SINGLE_KEY_COMMANDS) or (
166 name_low in _MULTI_KEY_COMMANDS and len(args) == 1
167 ):
168 span.set_tag("redis.key", args[0])
169
170 return old_execute_command(self, name, *args, **kwargs)
171
172 cls.execute_command = sentry_patched_execute_command
173
[end of sentry_sdk/integrations/redis.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sentry_sdk/integrations/redis.py b/sentry_sdk/integrations/redis.py
--- a/sentry_sdk/integrations/redis.py
+++ b/sentry_sdk/integrations/redis.py
@@ -2,7 +2,12 @@
from sentry_sdk import Hub
from sentry_sdk.consts import OP
-from sentry_sdk.utils import capture_internal_exceptions, logger
+from sentry_sdk.hub import _should_send_default_pii
+from sentry_sdk.utils import (
+ SENSITIVE_DATA_SUBSTITUTE,
+ capture_internal_exceptions,
+ logger,
+)
from sentry_sdk.integrations import Integration, DidNotEnable
from sentry_sdk._types import TYPE_CHECKING
@@ -15,8 +20,13 @@
)
_MULTI_KEY_COMMANDS = frozenset(["del", "touch", "unlink"])
-#: Trim argument lists to this many values
-_MAX_NUM_ARGS = 10
+_COMMANDS_INCLUDING_SENSITIVE_DATA = [
+ "auth",
+]
+
+_MAX_NUM_ARGS = 10 # Trim argument lists to this many values
+
+_DEFAULT_MAX_DATA_SIZE = 1024
def patch_redis_pipeline(pipeline_cls, is_cluster, get_command_args_fn):
@@ -96,6 +106,10 @@
class RedisIntegration(Integration):
identifier = "redis"
+ def __init__(self, max_data_size=_DEFAULT_MAX_DATA_SIZE):
+ # type: (int) -> None
+ self.max_data_size = max_data_size
+
@staticmethod
def setup_once():
# type: () -> None
@@ -139,8 +153,9 @@
def sentry_patched_execute_command(self, name, *args, **kwargs):
# type: (Any, str, *Any, **Any) -> Any
hub = Hub.current
+ integration = hub.get_integration(RedisIntegration)
- if hub.get_integration(RedisIntegration) is None:
+ if integration is None:
return old_execute_command(self, name, *args, **kwargs)
description = name
@@ -151,12 +166,33 @@
if i > _MAX_NUM_ARGS:
break
- description_parts.append(repr(arg))
+ name_low = name.lower()
+
+ if name_low in _COMMANDS_INCLUDING_SENSITIVE_DATA:
+ description_parts.append(SENSITIVE_DATA_SUBSTITUTE)
+ continue
+
+ arg_is_the_key = i == 0
+ if arg_is_the_key:
+ description_parts.append(repr(arg))
+
+ else:
+ if _should_send_default_pii():
+ description_parts.append(repr(arg))
+ else:
+ description_parts.append(SENSITIVE_DATA_SUBSTITUTE)
description = " ".join(description_parts)
+ data_should_be_truncated = (
+ integration.max_data_size and len(description) > integration.max_data_size
+ )
+ if data_should_be_truncated:
+ description = description[: integration.max_data_size - len("...")] + "..."
+
with hub.start_span(op=OP.DB_REDIS, description=description) as span:
span.set_tag("redis.is_cluster", is_cluster)
+
if name:
span.set_tag("redis.command", name)
|
{"golden_diff": "diff --git a/sentry_sdk/integrations/redis.py b/sentry_sdk/integrations/redis.py\n--- a/sentry_sdk/integrations/redis.py\n+++ b/sentry_sdk/integrations/redis.py\n@@ -2,7 +2,12 @@\n \n from sentry_sdk import Hub\n from sentry_sdk.consts import OP\n-from sentry_sdk.utils import capture_internal_exceptions, logger\n+from sentry_sdk.hub import _should_send_default_pii\n+from sentry_sdk.utils import (\n+ SENSITIVE_DATA_SUBSTITUTE,\n+ capture_internal_exceptions,\n+ logger,\n+)\n from sentry_sdk.integrations import Integration, DidNotEnable\n \n from sentry_sdk._types import TYPE_CHECKING\n@@ -15,8 +20,13 @@\n )\n _MULTI_KEY_COMMANDS = frozenset([\"del\", \"touch\", \"unlink\"])\n \n-#: Trim argument lists to this many values\n-_MAX_NUM_ARGS = 10\n+_COMMANDS_INCLUDING_SENSITIVE_DATA = [\n+ \"auth\",\n+]\n+\n+_MAX_NUM_ARGS = 10 # Trim argument lists to this many values\n+\n+_DEFAULT_MAX_DATA_SIZE = 1024\n \n \n def patch_redis_pipeline(pipeline_cls, is_cluster, get_command_args_fn):\n@@ -96,6 +106,10 @@\n class RedisIntegration(Integration):\n identifier = \"redis\"\n \n+ def __init__(self, max_data_size=_DEFAULT_MAX_DATA_SIZE):\n+ # type: (int) -> None\n+ self.max_data_size = max_data_size\n+\n @staticmethod\n def setup_once():\n # type: () -> None\n@@ -139,8 +153,9 @@\n def sentry_patched_execute_command(self, name, *args, **kwargs):\n # type: (Any, str, *Any, **Any) -> Any\n hub = Hub.current\n+ integration = hub.get_integration(RedisIntegration)\n \n- if hub.get_integration(RedisIntegration) is None:\n+ if integration is None:\n return old_execute_command(self, name, *args, **kwargs)\n \n description = name\n@@ -151,12 +166,33 @@\n if i > _MAX_NUM_ARGS:\n break\n \n- description_parts.append(repr(arg))\n+ name_low = name.lower()\n+\n+ if name_low in _COMMANDS_INCLUDING_SENSITIVE_DATA:\n+ description_parts.append(SENSITIVE_DATA_SUBSTITUTE)\n+ continue\n+\n+ arg_is_the_key = i == 0\n+ if arg_is_the_key:\n+ description_parts.append(repr(arg))\n+\n+ else:\n+ if _should_send_default_pii():\n+ description_parts.append(repr(arg))\n+ else:\n+ description_parts.append(SENSITIVE_DATA_SUBSTITUTE)\n \n description = \" \".join(description_parts)\n \n+ data_should_be_truncated = (\n+ integration.max_data_size and len(description) > integration.max_data_size\n+ )\n+ if data_should_be_truncated:\n+ description = description[: integration.max_data_size - len(\"...\")] + \"...\"\n+\n with hub.start_span(op=OP.DB_REDIS, description=description) as span:\n span.set_tag(\"redis.is_cluster\", is_cluster)\n+\n if name:\n span.set_tag(\"redis.command\", name)\n", "issue": "Do not add redis SET data in span description when `set_default_pii=False`\n### Problem Statement\n\nCurrently the redis integration records a span for all redis commands. This is good. \r\nBut when `send_default_pii=False` the value of the redis SET command (https://redis.io/commands/set/) should be redacted.\n\n### Solution Brainstorm\n\ndo it\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom sentry_sdk import Hub\nfrom sentry_sdk.consts import OP\nfrom sentry_sdk.utils import capture_internal_exceptions, logger\nfrom sentry_sdk.integrations import Integration, DidNotEnable\n\nfrom sentry_sdk._types import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from typing import Any, Sequence\n\n_SINGLE_KEY_COMMANDS = frozenset(\n [\"decr\", \"decrby\", \"get\", \"incr\", \"incrby\", \"pttl\", \"set\", \"setex\", \"setnx\", \"ttl\"]\n)\n_MULTI_KEY_COMMANDS = frozenset([\"del\", \"touch\", \"unlink\"])\n\n#: Trim argument lists to this many values\n_MAX_NUM_ARGS = 10\n\n\ndef patch_redis_pipeline(pipeline_cls, is_cluster, get_command_args_fn):\n # type: (Any, bool, Any) -> None\n old_execute = pipeline_cls.execute\n\n def sentry_patched_execute(self, *args, **kwargs):\n # type: (Any, *Any, **Any) -> Any\n hub = Hub.current\n\n if hub.get_integration(RedisIntegration) is None:\n return old_execute(self, *args, **kwargs)\n\n with hub.start_span(\n op=OP.DB_REDIS, description=\"redis.pipeline.execute\"\n ) as span:\n with capture_internal_exceptions():\n span.set_tag(\"redis.is_cluster\", is_cluster)\n transaction = self.transaction if not is_cluster else False\n span.set_tag(\"redis.transaction\", transaction)\n\n commands = []\n for i, arg in enumerate(self.command_stack):\n if i > _MAX_NUM_ARGS:\n break\n command_args = []\n for j, command_arg in enumerate(get_command_args_fn(arg)):\n if j > 0:\n command_arg = repr(command_arg)\n command_args.append(command_arg)\n commands.append(\" \".join(command_args))\n\n span.set_data(\n \"redis.commands\",\n {\"count\": len(self.command_stack), \"first_ten\": commands},\n )\n\n return old_execute(self, *args, **kwargs)\n\n pipeline_cls.execute = sentry_patched_execute\n\n\ndef _get_redis_command_args(command):\n # type: (Any) -> Sequence[Any]\n return command[0]\n\n\ndef _parse_rediscluster_command(command):\n # type: (Any) -> Sequence[Any]\n return command.args\n\n\ndef _patch_rediscluster():\n # type: () -> None\n try:\n import rediscluster # type: ignore\n except ImportError:\n return\n\n patch_redis_client(rediscluster.RedisCluster, is_cluster=True)\n\n # up to v1.3.6, __version__ attribute is a tuple\n # from v2.0.0, __version__ is a string and VERSION a tuple\n version = getattr(rediscluster, \"VERSION\", rediscluster.__version__)\n\n # StrictRedisCluster was introduced in v0.2.0 and removed in v2.0.0\n # https://github.com/Grokzen/redis-py-cluster/blob/master/docs/release-notes.rst\n if (0, 2, 0) < version < (2, 0, 0):\n pipeline_cls = rediscluster.pipeline.StrictClusterPipeline\n patch_redis_client(rediscluster.StrictRedisCluster, is_cluster=True)\n else:\n pipeline_cls = rediscluster.pipeline.ClusterPipeline\n\n patch_redis_pipeline(pipeline_cls, True, _parse_rediscluster_command)\n\n\nclass RedisIntegration(Integration):\n identifier = \"redis\"\n\n @staticmethod\n def setup_once():\n # type: () -> None\n try:\n import redis\n except ImportError:\n raise DidNotEnable(\"Redis client not installed\")\n\n patch_redis_client(redis.StrictRedis, is_cluster=False)\n patch_redis_pipeline(redis.client.Pipeline, False, _get_redis_command_args)\n try:\n strict_pipeline = redis.client.StrictPipeline # type: ignore\n except AttributeError:\n pass\n else:\n patch_redis_pipeline(strict_pipeline, False, _get_redis_command_args)\n\n try:\n import rb.clients # type: ignore\n except ImportError:\n pass\n else:\n patch_redis_client(rb.clients.FanoutClient, is_cluster=False)\n patch_redis_client(rb.clients.MappingClient, is_cluster=False)\n patch_redis_client(rb.clients.RoutingClient, is_cluster=False)\n\n try:\n _patch_rediscluster()\n except Exception:\n logger.exception(\"Error occurred while patching `rediscluster` library\")\n\n\ndef patch_redis_client(cls, is_cluster):\n # type: (Any, bool) -> None\n \"\"\"\n This function can be used to instrument custom redis client classes or\n subclasses.\n \"\"\"\n old_execute_command = cls.execute_command\n\n def sentry_patched_execute_command(self, name, *args, **kwargs):\n # type: (Any, str, *Any, **Any) -> Any\n hub = Hub.current\n\n if hub.get_integration(RedisIntegration) is None:\n return old_execute_command(self, name, *args, **kwargs)\n\n description = name\n\n with capture_internal_exceptions():\n description_parts = [name]\n for i, arg in enumerate(args):\n if i > _MAX_NUM_ARGS:\n break\n\n description_parts.append(repr(arg))\n\n description = \" \".join(description_parts)\n\n with hub.start_span(op=OP.DB_REDIS, description=description) as span:\n span.set_tag(\"redis.is_cluster\", is_cluster)\n if name:\n span.set_tag(\"redis.command\", name)\n\n if name and args:\n name_low = name.lower()\n if (name_low in _SINGLE_KEY_COMMANDS) or (\n name_low in _MULTI_KEY_COMMANDS and len(args) == 1\n ):\n span.set_tag(\"redis.key\", args[0])\n\n return old_execute_command(self, name, *args, **kwargs)\n\n cls.execute_command = sentry_patched_execute_command\n", "path": "sentry_sdk/integrations/redis.py"}]}
| 2,321 | 729 |
gh_patches_debug_6195
|
rasdani/github-patches
|
git_diff
|
pystiche__pystiche-132
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Create encoding preprocessors not until runtime
Right now the preprocessors are created at import
https://github.com/pmeier/pystiche/blob/cad5ab6e9485680f2543cf4397d0d21e72a88b9e/pystiche/enc/preprocessing.py#L1-L4
We should only create them if they are needed to speed up the import.
</issue>
<code>
[start of pystiche/enc/preprocessing.py]
1 from torch import nn
2 from pystiche.image import TorchPreprocessing, CaffePreprocessing
3
4 PREPROCESSORS = {"torch": TorchPreprocessing(), "caffe": CaffePreprocessing()}
5
6 __all__ = ["get_preprocessor"]
7
8
9 def get_preprocessor(framework: str) -> nn.Module:
10 return PREPROCESSORS[framework]
11
[end of pystiche/enc/preprocessing.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pystiche/enc/preprocessing.py b/pystiche/enc/preprocessing.py
--- a/pystiche/enc/preprocessing.py
+++ b/pystiche/enc/preprocessing.py
@@ -1,10 +1,10 @@
from torch import nn
from pystiche.image import TorchPreprocessing, CaffePreprocessing
-PREPROCESSORS = {"torch": TorchPreprocessing(), "caffe": CaffePreprocessing()}
+PREPROCESSORS = {"torch": TorchPreprocessing, "caffe": CaffePreprocessing}
__all__ = ["get_preprocessor"]
def get_preprocessor(framework: str) -> nn.Module:
- return PREPROCESSORS[framework]
+ return PREPROCESSORS[framework]()
|
{"golden_diff": "diff --git a/pystiche/enc/preprocessing.py b/pystiche/enc/preprocessing.py\n--- a/pystiche/enc/preprocessing.py\n+++ b/pystiche/enc/preprocessing.py\n@@ -1,10 +1,10 @@\n from torch import nn\n from pystiche.image import TorchPreprocessing, CaffePreprocessing\n \n-PREPROCESSORS = {\"torch\": TorchPreprocessing(), \"caffe\": CaffePreprocessing()}\n+PREPROCESSORS = {\"torch\": TorchPreprocessing, \"caffe\": CaffePreprocessing}\n \n __all__ = [\"get_preprocessor\"]\n \n \n def get_preprocessor(framework: str) -> nn.Module:\n- return PREPROCESSORS[framework]\n+ return PREPROCESSORS[framework]()\n", "issue": "Create encoding preprocessors not until runtime\nRight now the preprocessors are created at import\r\n\r\nhttps://github.com/pmeier/pystiche/blob/cad5ab6e9485680f2543cf4397d0d21e72a88b9e/pystiche/enc/preprocessing.py#L1-L4\r\n\r\nWe should only create them if they are needed to speed up the import.\n", "before_files": [{"content": "from torch import nn\nfrom pystiche.image import TorchPreprocessing, CaffePreprocessing\n\nPREPROCESSORS = {\"torch\": TorchPreprocessing(), \"caffe\": CaffePreprocessing()}\n\n__all__ = [\"get_preprocessor\"]\n\n\ndef get_preprocessor(framework: str) -> nn.Module:\n return PREPROCESSORS[framework]\n", "path": "pystiche/enc/preprocessing.py"}]}
| 722 | 158 |
gh_patches_debug_5460
|
rasdani/github-patches
|
git_diff
|
CTFd__CTFd-1033
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Exporting always downloads the same zip
**Environment**:
- CTFd Version/Commit: 2.1.2
- Operating System: Standard docker-compose launch on Linux
- Web Browser and Version: Firefox 67.0.3
**What happened?**
When exporting the challenges from `/admin/config#backup`, the same zip downloads as the first export. It only contains the one challenge that was there when the backup was made, although there are around 30 challenges.
**What did you expect to happen?**
The export should download a zip file with all of the current challenges.
**How to reproduce your issue**
Export multiple times with different numbers of challenges.
</issue>
<code>
[start of CTFd/admin/__init__.py]
1 from flask import (
2 current_app as app,
3 render_template,
4 request,
5 redirect,
6 url_for,
7 Blueprint,
8 abort,
9 render_template_string,
10 send_file,
11 )
12
13 from CTFd.utils.decorators import admins_only
14 from CTFd.utils.user import is_admin
15 from CTFd.utils.security.auth import logout_user
16 from CTFd.utils import config as ctf_config, get_config, set_config
17 from CTFd.cache import cache, clear_config
18 from CTFd.utils.helpers import get_errors
19 from CTFd.utils.exports import (
20 export_ctf as export_ctf_util,
21 import_ctf as import_ctf_util,
22 )
23 from CTFd.models import (
24 db,
25 get_class_by_tablename,
26 Users,
27 Teams,
28 Configs,
29 Submissions,
30 Solves,
31 Awards,
32 Unlocks,
33 Tracking,
34 )
35 import datetime
36 import os
37 import six
38 import csv
39
40
41 admin = Blueprint("admin", __name__)
42
43
44 from CTFd.admin import challenges # noqa: F401
45 from CTFd.admin import pages # noqa: F401
46 from CTFd.admin import scoreboard # noqa: F401
47 from CTFd.admin import statistics # noqa: F401
48 from CTFd.admin import teams # noqa: F401
49 from CTFd.admin import users # noqa: F401
50 from CTFd.admin import submissions # noqa: F401
51 from CTFd.admin import notifications # noqa: F401
52
53
54 @admin.route("/admin", methods=["GET"])
55 def view():
56 if is_admin():
57 return redirect(url_for("admin.statistics"))
58 return redirect(url_for("auth.login"))
59
60
61 @admin.route("/admin/plugins/<plugin>", methods=["GET", "POST"])
62 @admins_only
63 def plugin(plugin):
64 if request.method == "GET":
65 plugins_path = os.path.join(app.root_path, "plugins")
66
67 config_html_plugins = [
68 name
69 for name in os.listdir(plugins_path)
70 if os.path.isfile(os.path.join(plugins_path, name, "config.html"))
71 ]
72
73 if plugin in config_html_plugins:
74 config_html = open(
75 os.path.join(app.root_path, "plugins", plugin, "config.html")
76 ).read()
77 return render_template_string(config_html)
78 abort(404)
79 elif request.method == "POST":
80 for k, v in request.form.items():
81 if k == "nonce":
82 continue
83 set_config(k, v)
84 with app.app_context():
85 clear_config()
86 return "1"
87
88
89 @admin.route("/admin/import", methods=["POST"])
90 @admins_only
91 def import_ctf():
92 backup = request.files["backup"]
93 errors = get_errors()
94 try:
95 import_ctf_util(backup)
96 except Exception as e:
97 print(e)
98 errors.append(repr(e))
99
100 if errors:
101 return errors[0], 500
102 else:
103 return redirect(url_for("admin.config"))
104
105
106 @admin.route("/admin/export", methods=["GET", "POST"])
107 @admins_only
108 def export_ctf():
109 backup = export_ctf_util()
110 ctf_name = ctf_config.ctf_name()
111 day = datetime.datetime.now().strftime("%Y-%m-%d")
112 full_name = u"{}.{}.zip".format(ctf_name, day)
113 return send_file(backup, as_attachment=True, attachment_filename=full_name)
114
115
116 @admin.route("/admin/export/csv")
117 @admins_only
118 def export_csv():
119 table = request.args.get("table")
120
121 # TODO: It might make sense to limit dumpable tables. Config could potentially leak sensitive information.
122 model = get_class_by_tablename(table)
123 if model is None:
124 abort(404)
125
126 output = six.StringIO()
127 writer = csv.writer(output)
128
129 header = [column.name for column in model.__mapper__.columns]
130 writer.writerow(header)
131
132 responses = model.query.all()
133
134 for curr in responses:
135 writer.writerow(
136 [getattr(curr, column.name) for column in model.__mapper__.columns]
137 )
138
139 output.seek(0)
140 return send_file(
141 output,
142 as_attachment=True,
143 cache_timeout=-1,
144 attachment_filename="{name}-{table}.csv".format(
145 name=ctf_config.ctf_name(), table=table
146 ),
147 )
148
149
150 @admin.route("/admin/config", methods=["GET", "POST"])
151 @admins_only
152 def config():
153 # Clear the config cache so that we don't get stale values
154 clear_config()
155
156 database_tables = sorted(db.metadata.tables.keys())
157
158 configs = Configs.query.all()
159 configs = dict([(c.key, get_config(c.key)) for c in configs])
160
161 themes = ctf_config.get_themes()
162 themes.remove(get_config("ctf_theme"))
163
164 return render_template(
165 "admin/config.html", database_tables=database_tables, themes=themes, **configs
166 )
167
168
169 @admin.route("/admin/reset", methods=["GET", "POST"])
170 @admins_only
171 def reset():
172 if request.method == "POST":
173 # Truncate Users, Teams, Submissions, Solves, Notifications, Awards, Unlocks, Tracking
174 Tracking.query.delete()
175 Solves.query.delete()
176 Submissions.query.delete()
177 Awards.query.delete()
178 Unlocks.query.delete()
179 Users.query.delete()
180 Teams.query.delete()
181 set_config("setup", False)
182 db.session.commit()
183 cache.clear()
184 logout_user()
185 db.session.close()
186 return redirect(url_for("views.setup"))
187
188 return render_template("admin/reset.html")
189
[end of CTFd/admin/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/CTFd/admin/__init__.py b/CTFd/admin/__init__.py
--- a/CTFd/admin/__init__.py
+++ b/CTFd/admin/__init__.py
@@ -110,7 +110,9 @@
ctf_name = ctf_config.ctf_name()
day = datetime.datetime.now().strftime("%Y-%m-%d")
full_name = u"{}.{}.zip".format(ctf_name, day)
- return send_file(backup, as_attachment=True, attachment_filename=full_name)
+ return send_file(
+ backup, cache_timeout=-1, as_attachment=True, attachment_filename=full_name
+ )
@admin.route("/admin/export/csv")
|
{"golden_diff": "diff --git a/CTFd/admin/__init__.py b/CTFd/admin/__init__.py\n--- a/CTFd/admin/__init__.py\n+++ b/CTFd/admin/__init__.py\n@@ -110,7 +110,9 @@\n ctf_name = ctf_config.ctf_name()\n day = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n full_name = u\"{}.{}.zip\".format(ctf_name, day)\n- return send_file(backup, as_attachment=True, attachment_filename=full_name)\n+ return send_file(\n+ backup, cache_timeout=-1, as_attachment=True, attachment_filename=full_name\n+ )\n \n \n @admin.route(\"/admin/export/csv\")\n", "issue": "Exporting always downloads the same zip\n**Environment**:\r\n\r\n - CTFd Version/Commit: 2.1.2\r\n - Operating System: Standard docker-compose launch on Linux\r\n - Web Browser and Version: Firefox 67.0.3\r\n\r\n**What happened?**\r\n\r\nWhen exporting the challenges from `/admin/config#backup`, the same zip downloads as the first export. It only contains the one challenge that was there when the backup was made, although there are around 30 challenges.\r\n\r\n**What did you expect to happen?**\r\n\r\nThe export should download a zip file with all of the current challenges.\r\n\r\n**How to reproduce your issue**\r\n\r\nExport multiple times with different numbers of challenges.\r\n\r\n\n", "before_files": [{"content": "from flask import (\n current_app as app,\n render_template,\n request,\n redirect,\n url_for,\n Blueprint,\n abort,\n render_template_string,\n send_file,\n)\n\nfrom CTFd.utils.decorators import admins_only\nfrom CTFd.utils.user import is_admin\nfrom CTFd.utils.security.auth import logout_user\nfrom CTFd.utils import config as ctf_config, get_config, set_config\nfrom CTFd.cache import cache, clear_config\nfrom CTFd.utils.helpers import get_errors\nfrom CTFd.utils.exports import (\n export_ctf as export_ctf_util,\n import_ctf as import_ctf_util,\n)\nfrom CTFd.models import (\n db,\n get_class_by_tablename,\n Users,\n Teams,\n Configs,\n Submissions,\n Solves,\n Awards,\n Unlocks,\n Tracking,\n)\nimport datetime\nimport os\nimport six\nimport csv\n\n\nadmin = Blueprint(\"admin\", __name__)\n\n\nfrom CTFd.admin import challenges # noqa: F401\nfrom CTFd.admin import pages # noqa: F401\nfrom CTFd.admin import scoreboard # noqa: F401\nfrom CTFd.admin import statistics # noqa: F401\nfrom CTFd.admin import teams # noqa: F401\nfrom CTFd.admin import users # noqa: F401\nfrom CTFd.admin import submissions # noqa: F401\nfrom CTFd.admin import notifications # noqa: F401\n\n\[email protected](\"/admin\", methods=[\"GET\"])\ndef view():\n if is_admin():\n return redirect(url_for(\"admin.statistics\"))\n return redirect(url_for(\"auth.login\"))\n\n\[email protected](\"/admin/plugins/<plugin>\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef plugin(plugin):\n if request.method == \"GET\":\n plugins_path = os.path.join(app.root_path, \"plugins\")\n\n config_html_plugins = [\n name\n for name in os.listdir(plugins_path)\n if os.path.isfile(os.path.join(plugins_path, name, \"config.html\"))\n ]\n\n if plugin in config_html_plugins:\n config_html = open(\n os.path.join(app.root_path, \"plugins\", plugin, \"config.html\")\n ).read()\n return render_template_string(config_html)\n abort(404)\n elif request.method == \"POST\":\n for k, v in request.form.items():\n if k == \"nonce\":\n continue\n set_config(k, v)\n with app.app_context():\n clear_config()\n return \"1\"\n\n\[email protected](\"/admin/import\", methods=[\"POST\"])\n@admins_only\ndef import_ctf():\n backup = request.files[\"backup\"]\n errors = get_errors()\n try:\n import_ctf_util(backup)\n except Exception as e:\n print(e)\n errors.append(repr(e))\n\n if errors:\n return errors[0], 500\n else:\n return redirect(url_for(\"admin.config\"))\n\n\[email protected](\"/admin/export\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef export_ctf():\n backup = export_ctf_util()\n ctf_name = ctf_config.ctf_name()\n day = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n full_name = u\"{}.{}.zip\".format(ctf_name, day)\n return send_file(backup, as_attachment=True, attachment_filename=full_name)\n\n\[email protected](\"/admin/export/csv\")\n@admins_only\ndef export_csv():\n table = request.args.get(\"table\")\n\n # TODO: It might make sense to limit dumpable tables. Config could potentially leak sensitive information.\n model = get_class_by_tablename(table)\n if model is None:\n abort(404)\n\n output = six.StringIO()\n writer = csv.writer(output)\n\n header = [column.name for column in model.__mapper__.columns]\n writer.writerow(header)\n\n responses = model.query.all()\n\n for curr in responses:\n writer.writerow(\n [getattr(curr, column.name) for column in model.__mapper__.columns]\n )\n\n output.seek(0)\n return send_file(\n output,\n as_attachment=True,\n cache_timeout=-1,\n attachment_filename=\"{name}-{table}.csv\".format(\n name=ctf_config.ctf_name(), table=table\n ),\n )\n\n\[email protected](\"/admin/config\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef config():\n # Clear the config cache so that we don't get stale values\n clear_config()\n\n database_tables = sorted(db.metadata.tables.keys())\n\n configs = Configs.query.all()\n configs = dict([(c.key, get_config(c.key)) for c in configs])\n\n themes = ctf_config.get_themes()\n themes.remove(get_config(\"ctf_theme\"))\n\n return render_template(\n \"admin/config.html\", database_tables=database_tables, themes=themes, **configs\n )\n\n\[email protected](\"/admin/reset\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef reset():\n if request.method == \"POST\":\n # Truncate Users, Teams, Submissions, Solves, Notifications, Awards, Unlocks, Tracking\n Tracking.query.delete()\n Solves.query.delete()\n Submissions.query.delete()\n Awards.query.delete()\n Unlocks.query.delete()\n Users.query.delete()\n Teams.query.delete()\n set_config(\"setup\", False)\n db.session.commit()\n cache.clear()\n logout_user()\n db.session.close()\n return redirect(url_for(\"views.setup\"))\n\n return render_template(\"admin/reset.html\")\n", "path": "CTFd/admin/__init__.py"}]}
| 2,368 | 157 |
gh_patches_debug_1042
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-395
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
list_events url is inconsistent in API v2
The url is `/events/` whereas it should be `/events` to be consistent with other urls.
</issue>
<code>
[start of open_event/api/events.py]
1 from flask.ext.restplus import Resource, Namespace, fields
2
3 from open_event.models.event import Event as EventModel
4 from .helpers import get_object_list, get_object_or_404
5
6 api = Namespace('events', description='Events')
7
8 EVENT = api.model('Event', {
9 'id': fields.Integer(required=True),
10 'name': fields.String,
11 'email': fields.String,
12 'color': fields.String,
13 'logo': fields.String,
14 'start_time': fields.DateTime,
15 'end_time': fields.DateTime,
16 'latitude': fields.Float,
17 'longitude': fields.Float,
18 'slogan': fields.String,
19 'url': fields.String,
20 'location_name': fields.String,
21 })
22
23
24 @api.route('/<int:event_id>')
25 @api.param('event_id')
26 @api.response(404, 'Event not found')
27 class Event(Resource):
28 @api.doc('get_event')
29 @api.marshal_with(EVENT)
30 def get(self, event_id):
31 """Fetch an event given its id"""
32 return get_object_or_404(EventModel, event_id)
33
34
35 @api.route('/')
36 class EventList(Resource):
37 @api.doc('list_events')
38 @api.marshal_list_with(EVENT)
39 def get(self):
40 """List all events"""
41 return get_object_list(EventModel)
42
[end of open_event/api/events.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/open_event/api/events.py b/open_event/api/events.py
--- a/open_event/api/events.py
+++ b/open_event/api/events.py
@@ -32,7 +32,7 @@
return get_object_or_404(EventModel, event_id)
[email protected]('/')
[email protected]('')
class EventList(Resource):
@api.doc('list_events')
@api.marshal_list_with(EVENT)
|
{"golden_diff": "diff --git a/open_event/api/events.py b/open_event/api/events.py\n--- a/open_event/api/events.py\n+++ b/open_event/api/events.py\n@@ -32,7 +32,7 @@\n return get_object_or_404(EventModel, event_id)\n \n \[email protected]('/')\[email protected]('')\n class EventList(Resource):\n @api.doc('list_events')\n @api.marshal_list_with(EVENT)\n", "issue": "list_events url is inconsistent in API v2\nThe url is `/events/` whereas it should be `/events` to be consistent with other urls. \n\n", "before_files": [{"content": "from flask.ext.restplus import Resource, Namespace, fields\n\nfrom open_event.models.event import Event as EventModel\nfrom .helpers import get_object_list, get_object_or_404\n\napi = Namespace('events', description='Events')\n\nEVENT = api.model('Event', {\n 'id': fields.Integer(required=True),\n 'name': fields.String,\n 'email': fields.String,\n 'color': fields.String,\n 'logo': fields.String,\n 'start_time': fields.DateTime,\n 'end_time': fields.DateTime,\n 'latitude': fields.Float,\n 'longitude': fields.Float,\n 'slogan': fields.String,\n 'url': fields.String,\n 'location_name': fields.String,\n})\n\n\[email protected]('/<int:event_id>')\[email protected]('event_id')\[email protected](404, 'Event not found')\nclass Event(Resource):\n @api.doc('get_event')\n @api.marshal_with(EVENT)\n def get(self, event_id):\n \"\"\"Fetch an event given its id\"\"\"\n return get_object_or_404(EventModel, event_id)\n\n\[email protected]('/')\nclass EventList(Resource):\n @api.doc('list_events')\n @api.marshal_list_with(EVENT)\n def get(self):\n \"\"\"List all events\"\"\"\n return get_object_list(EventModel)\n", "path": "open_event/api/events.py"}]}
| 923 | 92 |
gh_patches_debug_6894
|
rasdani/github-patches
|
git_diff
|
ray-project__ray-1662
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[DataFrame] Error checking on Pandas version
We need better reporting for issues with the Pandas version on a user's system.
</issue>
<code>
[start of python/ray/dataframe/__init__.py]
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4 import threading
5
6 DEFAULT_NPARTITIONS = 4
7
8
9 def set_npartition_default(n):
10 global DEFAULT_NPARTITIONS
11 DEFAULT_NPARTITIONS = n
12
13
14 def get_npartitions():
15 return DEFAULT_NPARTITIONS
16
17
18 # We import these file after above two function
19 # because they depend on npartitions.
20 from .dataframe import DataFrame # noqa: 402
21 from .dataframe import from_pandas # noqa: 402
22 from .dataframe import to_pandas # noqa: 402
23 from .series import Series # noqa: 402
24 from .io import (read_csv, read_parquet) # noqa: 402
25
26 __all__ = [
27 "DataFrame", "from_pandas", "to_pandas", "Series", "read_csv",
28 "read_parquet"
29 ]
30
31 try:
32 if threading.current_thread().name == "MainThread":
33 import ray
34 ray.init()
35 except AssertionError:
36 pass
37
[end of python/ray/dataframe/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/python/ray/dataframe/__init__.py b/python/ray/dataframe/__init__.py
--- a/python/ray/dataframe/__init__.py
+++ b/python/ray/dataframe/__init__.py
@@ -1,8 +1,18 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+
+import pandas as pd
import threading
+pd_version = pd.__version__
+pd_major = int(pd_version.split(".")[0])
+pd_minor = int(pd_version.split(".")[1])
+
+if pd_major == 0 and pd_minor < 22:
+ raise Exception("In order to use Pandas on Ray, please upgrade your Pandas"
+ " version to >= 0.22.")
+
DEFAULT_NPARTITIONS = 4
|
{"golden_diff": "diff --git a/python/ray/dataframe/__init__.py b/python/ray/dataframe/__init__.py\n--- a/python/ray/dataframe/__init__.py\n+++ b/python/ray/dataframe/__init__.py\n@@ -1,8 +1,18 @@\n from __future__ import absolute_import\n from __future__ import division\n from __future__ import print_function\n+\n+import pandas as pd\n import threading\n \n+pd_version = pd.__version__\n+pd_major = int(pd_version.split(\".\")[0])\n+pd_minor = int(pd_version.split(\".\")[1])\n+\n+if pd_major == 0 and pd_minor < 22:\n+ raise Exception(\"In order to use Pandas on Ray, please upgrade your Pandas\"\n+ \" version to >= 0.22.\")\n+\n DEFAULT_NPARTITIONS = 4\n", "issue": "[DataFrame] Error checking on Pandas version\nWe need better reporting for issues with the Pandas version on a user's system. \n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport threading\n\nDEFAULT_NPARTITIONS = 4\n\n\ndef set_npartition_default(n):\n global DEFAULT_NPARTITIONS\n DEFAULT_NPARTITIONS = n\n\n\ndef get_npartitions():\n return DEFAULT_NPARTITIONS\n\n\n# We import these file after above two function\n# because they depend on npartitions.\nfrom .dataframe import DataFrame # noqa: 402\nfrom .dataframe import from_pandas # noqa: 402\nfrom .dataframe import to_pandas # noqa: 402\nfrom .series import Series # noqa: 402\nfrom .io import (read_csv, read_parquet) # noqa: 402\n\n__all__ = [\n \"DataFrame\", \"from_pandas\", \"to_pandas\", \"Series\", \"read_csv\",\n \"read_parquet\"\n]\n\ntry:\n if threading.current_thread().name == \"MainThread\":\n import ray\n ray.init()\nexcept AssertionError:\n pass\n", "path": "python/ray/dataframe/__init__.py"}]}
| 877 | 181 |
gh_patches_debug_10727
|
rasdani/github-patches
|
git_diff
|
akvo__akvo-rsr-2712
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
react-clickoutside doesn't close the date-picker on clicking outside
Created via Reamaze:
Link: https://akvoo.reamaze.com/admin/conversations/bug-10
Assignee: Anthony Gonzalez
React-clickoutside needs to load before the date-picker loads
</issue>
<code>
[start of akvo/rsr/context_processors.py]
1 # -*- coding: utf-8 -*-
2 """
3 Akvo RSR is covered by the GNU Affero General Public License.
4
5 See more details in the license.txt file located at the root folder of the
6 Akvo RSR module. For additional details on the GNU license please see
7 < http://www.gnu.org/licenses/agpl.html >.
8 """
9
10 import django
11
12 from django.conf import settings
13 from django.core.exceptions import DisallowedHost
14 from django.contrib.sites.models import get_current_site
15
16
17 def extra_context(request, protocol="http"):
18 """Add information to the request context."""
19 try:
20 current_site = get_current_site(request)
21 except DisallowedHost:
22 current_site = None
23
24 django_version = django.get_version()
25 debug = getattr(settings, 'DEBUG', False)
26 deploy_tag = getattr(settings, 'DEPLOY_TAG', 'Unknown')
27 deploy_branch = getattr(settings, 'DEPLOY_BRANCH', 'Unknown')
28 deploy_commit_id = getattr(settings, 'DEPLOY_COMMIT_ID', 'Unknown')
29 deploy_commit_full_id = getattr(settings, 'DEPLOY_COMMIT_FULL_ID', 'Unknown')
30
31 return dict(
32 current_site=current_site,
33 django_version=django_version,
34 debug=debug,
35 deploy_tag=deploy_tag,
36 deploy_branch=deploy_branch,
37 deploy_commit_id=deploy_commit_id,
38 deploy_commit_full_id=deploy_commit_full_id
39 )
40
41
42 def get_current_path_without_lang(request):
43 """Return current path without lang."""
44 path = request.get_full_path()
45 path_bits = path.split('/')
46 path = '/'.join(path_bits[2:])
47 return {'current_path_without_lang': path}
48
49
50 def extra_pages_context(request):
51 """Add context information of an RSR Page."""
52 if request.rsr_page:
53 page = request.rsr_page
54 return {
55 'rsr_page': page,
56 'favicon': page.favicon,
57 'logo': page.logo,
58 'organisation': page.organisation,
59 'return_url': page.return_url,
60 'return_url_text': page.custom_return_url_text,
61 'stylesheet': page.stylesheet,
62 'akvoapp_root_url': '//{}'.format(settings.AKVOAPP_DOMAIN),
63 'domain_url': '//{}'.format(settings.RSR_DOMAIN),
64 'no_facebook': not page.facebook_button,
65 'facebook_app_id': page.facebook_app_id,
66 'no_twitter': not page.twitter_button,
67 }
68
69 return {}
70
[end of akvo/rsr/context_processors.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/akvo/rsr/context_processors.py b/akvo/rsr/context_processors.py
--- a/akvo/rsr/context_processors.py
+++ b/akvo/rsr/context_processors.py
@@ -58,7 +58,7 @@
'organisation': page.organisation,
'return_url': page.return_url,
'return_url_text': page.custom_return_url_text,
- 'stylesheet': page.stylesheet,
+ 'page_stylesheet': page.stylesheet,
'akvoapp_root_url': '//{}'.format(settings.AKVOAPP_DOMAIN),
'domain_url': '//{}'.format(settings.RSR_DOMAIN),
'no_facebook': not page.facebook_button,
|
{"golden_diff": "diff --git a/akvo/rsr/context_processors.py b/akvo/rsr/context_processors.py\n--- a/akvo/rsr/context_processors.py\n+++ b/akvo/rsr/context_processors.py\n@@ -58,7 +58,7 @@\n 'organisation': page.organisation,\n 'return_url': page.return_url,\n 'return_url_text': page.custom_return_url_text,\n- 'stylesheet': page.stylesheet,\n+ 'page_stylesheet': page.stylesheet,\n 'akvoapp_root_url': '//{}'.format(settings.AKVOAPP_DOMAIN),\n 'domain_url': '//{}'.format(settings.RSR_DOMAIN),\n 'no_facebook': not page.facebook_button,\n", "issue": "react-clickoutside doesn't close the date-picker on clicking outside\nCreated via Reamaze:\r\n\r\nLink: https://akvoo.reamaze.com/admin/conversations/bug-10\r\nAssignee: Anthony Gonzalez\r\n\r\nReact-clickoutside needs to load before the date-picker loads\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nAkvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the\nAkvo RSR module. For additional details on the GNU license please see\n< http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nimport django\n\nfrom django.conf import settings\nfrom django.core.exceptions import DisallowedHost\nfrom django.contrib.sites.models import get_current_site\n\n\ndef extra_context(request, protocol=\"http\"):\n \"\"\"Add information to the request context.\"\"\"\n try:\n current_site = get_current_site(request)\n except DisallowedHost:\n current_site = None\n\n django_version = django.get_version()\n debug = getattr(settings, 'DEBUG', False)\n deploy_tag = getattr(settings, 'DEPLOY_TAG', 'Unknown')\n deploy_branch = getattr(settings, 'DEPLOY_BRANCH', 'Unknown')\n deploy_commit_id = getattr(settings, 'DEPLOY_COMMIT_ID', 'Unknown')\n deploy_commit_full_id = getattr(settings, 'DEPLOY_COMMIT_FULL_ID', 'Unknown')\n\n return dict(\n current_site=current_site,\n django_version=django_version,\n debug=debug,\n deploy_tag=deploy_tag,\n deploy_branch=deploy_branch,\n deploy_commit_id=deploy_commit_id,\n deploy_commit_full_id=deploy_commit_full_id\n )\n\n\ndef get_current_path_without_lang(request):\n \"\"\"Return current path without lang.\"\"\"\n path = request.get_full_path()\n path_bits = path.split('/')\n path = '/'.join(path_bits[2:])\n return {'current_path_without_lang': path}\n\n\ndef extra_pages_context(request):\n \"\"\"Add context information of an RSR Page.\"\"\"\n if request.rsr_page:\n page = request.rsr_page\n return {\n 'rsr_page': page,\n 'favicon': page.favicon,\n 'logo': page.logo,\n 'organisation': page.organisation,\n 'return_url': page.return_url,\n 'return_url_text': page.custom_return_url_text,\n 'stylesheet': page.stylesheet,\n 'akvoapp_root_url': '//{}'.format(settings.AKVOAPP_DOMAIN),\n 'domain_url': '//{}'.format(settings.RSR_DOMAIN),\n 'no_facebook': not page.facebook_button,\n 'facebook_app_id': page.facebook_app_id,\n 'no_twitter': not page.twitter_button,\n }\n\n return {}\n", "path": "akvo/rsr/context_processors.py"}]}
| 1,236 | 145 |
gh_patches_debug_9837
|
rasdani/github-patches
|
git_diff
|
StackStorm__st2-2489
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
action alias regex fails to evaluate null optional arguments when type is not string
For example, something like: `update {{hostname}}( {{count}} times)?`
Works for `!update myhost 2 times`
Fails for `!update myhost`
Error: `(invalid literal for int() with base 10: '')`
So the workaround is to change the type of `count` from integer to string.
Or @emedvedev provides another workaround:
```
- update {{ hostname }} {{ count }} times
- update {{ hostname }}
```
Start from the most explicit.
</issue>
<code>
[start of st2common/st2common/models/utils/action_alias_utils.py]
1 # Licensed to the StackStorm, Inc ('StackStorm') under one or more
2 # contributor license agreements. See the NOTICE file distributed with
3 # this work for additional information regarding copyright ownership.
4 # The ASF licenses this file to You under the Apache License, Version 2.0
5 # (the "License"); you may not use this file except in compliance with
6 # the License. You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import re
17 from st2common.exceptions import content
18
19 __all__ = [
20 'ActionAliasFormatParser'
21 ]
22
23
24 class ActionAliasFormatParser(object):
25
26 def __init__(self, alias_format=None, param_stream=None):
27 self._format = alias_format or ''
28 self._param_stream = param_stream or ''
29
30 def get_extracted_param_value(self):
31
32 result = {}
33
34 # As there's a lot of questions about using regular expressions,
35 # I'll try to be thorough when documenting this code.
36
37 # We're parsing the arbitrary key-value pairs at the end of the stream
38 # to support passing of parameters not specified in the format string,
39 # and cutting them from the stream as they're no longer needed.
40 # Possible values are quoted strings, a word, or anything inside "{}".
41 pairs_match = r'(?:^|\s+)(\S+)=("(.*?)"|\'(.*?)\'|({.*?})|(\S+))'
42 extra = re.match(r'.*?((' + pairs_match + r'\s*)*)$',
43 self._param_stream, re.DOTALL)
44 if extra:
45 kv_pairs = re.findall(pairs_match,
46 extra.group(1), re.DOTALL)
47 self._param_stream = self._param_stream.replace(extra.group(1), '')
48 self._param_stream = " %s " % self._param_stream
49
50 # Now we'll match parameters with default values in form of
51 # {{ value = parameter }} (and all possible permutations of spaces),
52 # compiling them into a list.
53 # "test {{ url = http://google.com }} {{ extra = Test }}" will become
54 # [ ["url", "http://google.com"], ["extra", "Test"] ]
55 params = re.findall(r'{{\s*(.+?)\s*(?:=\s*[\'"]?({.+?}|.+?)[\'"]?)?\s*}}',
56 self._format, re.DOTALL)
57
58 # Now we're transforming our format string into a regular expression,
59 # substituting {{ ... }} with regex named groups, so that param_stream
60 # matched against this expression yields a dict of params with values.
61 param_match = r'["\']?(?P<\2>(?:(?<=\').+?(?=\')|(?<=").+?(?=")|{.+?}|.+?))["\']?'
62 reg = re.sub(r'(\s*){{\s*([^=}]+?)\s*}}(?![\'"]?\s+}})',
63 r'\1' + param_match,
64 self._format)
65 reg = re.sub(r'(\s*){{\s*(\S+)\s*=\s*(?:{.+?}|.+?)\s*}}',
66 r'(?:\1' + param_match + r')?',
67 reg)
68 reg = re.sub(r'(\s*){{\s*(.+?)\s*}}',
69 r'\1' + param_match,
70 reg)
71 reg = '^\s*' + reg + r'\s*$'
72
73 # Now we're matching param_stream against our format string regex,
74 # getting a dict of values. We'll also get default values from
75 # "params" list if something is not present.
76 # Priority, from lowest to highest:
77 # 1. Default parameters
78 # 2. Matched parameters
79 # 3. Extra parameters
80 matched_stream = re.match(reg, self._param_stream, re.DOTALL)
81 if matched_stream:
82 values = matched_stream.groupdict()
83 for param in params:
84 matched_value = values[param[0]] if matched_stream else None
85 result[param[0]] = matched_value or param[1]
86 if extra:
87 for pair in kv_pairs:
88 result[pair[0]] = ''.join(pair[2:])
89
90 if self._format and not (self._param_stream.strip() or any(result.values())):
91 raise content.ParseException('No value supplied and no default value found.')
92
93 return result
94
[end of st2common/st2common/models/utils/action_alias_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/st2common/st2common/models/utils/action_alias_utils.py b/st2common/st2common/models/utils/action_alias_utils.py
--- a/st2common/st2common/models/utils/action_alias_utils.py
+++ b/st2common/st2common/models/utils/action_alias_utils.py
@@ -82,7 +82,9 @@
values = matched_stream.groupdict()
for param in params:
matched_value = values[param[0]] if matched_stream else None
- result[param[0]] = matched_value or param[1]
+ matched_result = matched_value or param[1]
+ if matched_result:
+ result[param[0]] = matched_result
if extra:
for pair in kv_pairs:
result[pair[0]] = ''.join(pair[2:])
|
{"golden_diff": "diff --git a/st2common/st2common/models/utils/action_alias_utils.py b/st2common/st2common/models/utils/action_alias_utils.py\n--- a/st2common/st2common/models/utils/action_alias_utils.py\n+++ b/st2common/st2common/models/utils/action_alias_utils.py\n@@ -82,7 +82,9 @@\n values = matched_stream.groupdict()\n for param in params:\n matched_value = values[param[0]] if matched_stream else None\n- result[param[0]] = matched_value or param[1]\n+ matched_result = matched_value or param[1]\n+ if matched_result:\n+ result[param[0]] = matched_result\n if extra:\n for pair in kv_pairs:\n result[pair[0]] = ''.join(pair[2:])\n", "issue": "action alias regex fails to evaluate null optional arguments when type is not string\nFor example, something like: `update {{hostname}}( {{count}} times)?` \nWorks for `!update myhost 2 times`\nFails for `!update myhost`\nError: `(invalid literal for int() with base 10: '')`\nSo the workaround is to change the type of `count` from integer to string.\nOr @emedvedev provides another workaround:\n\n```\n- update {{ hostname }} {{ count }} times\n- update {{ hostname }}\n```\n\nStart from the most explicit.\n\n", "before_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\nfrom st2common.exceptions import content\n\n__all__ = [\n 'ActionAliasFormatParser'\n]\n\n\nclass ActionAliasFormatParser(object):\n\n def __init__(self, alias_format=None, param_stream=None):\n self._format = alias_format or ''\n self._param_stream = param_stream or ''\n\n def get_extracted_param_value(self):\n\n result = {}\n\n # As there's a lot of questions about using regular expressions,\n # I'll try to be thorough when documenting this code.\n\n # We're parsing the arbitrary key-value pairs at the end of the stream\n # to support passing of parameters not specified in the format string,\n # and cutting them from the stream as they're no longer needed.\n # Possible values are quoted strings, a word, or anything inside \"{}\".\n pairs_match = r'(?:^|\\s+)(\\S+)=(\"(.*?)\"|\\'(.*?)\\'|({.*?})|(\\S+))'\n extra = re.match(r'.*?((' + pairs_match + r'\\s*)*)$',\n self._param_stream, re.DOTALL)\n if extra:\n kv_pairs = re.findall(pairs_match,\n extra.group(1), re.DOTALL)\n self._param_stream = self._param_stream.replace(extra.group(1), '')\n self._param_stream = \" %s \" % self._param_stream\n\n # Now we'll match parameters with default values in form of\n # {{ value = parameter }} (and all possible permutations of spaces),\n # compiling them into a list.\n # \"test {{ url = http://google.com }} {{ extra = Test }}\" will become\n # [ [\"url\", \"http://google.com\"], [\"extra\", \"Test\"] ]\n params = re.findall(r'{{\\s*(.+?)\\s*(?:=\\s*[\\'\"]?({.+?}|.+?)[\\'\"]?)?\\s*}}',\n self._format, re.DOTALL)\n\n # Now we're transforming our format string into a regular expression,\n # substituting {{ ... }} with regex named groups, so that param_stream\n # matched against this expression yields a dict of params with values.\n param_match = r'[\"\\']?(?P<\\2>(?:(?<=\\').+?(?=\\')|(?<=\").+?(?=\")|{.+?}|.+?))[\"\\']?'\n reg = re.sub(r'(\\s*){{\\s*([^=}]+?)\\s*}}(?![\\'\"]?\\s+}})',\n r'\\1' + param_match,\n self._format)\n reg = re.sub(r'(\\s*){{\\s*(\\S+)\\s*=\\s*(?:{.+?}|.+?)\\s*}}',\n r'(?:\\1' + param_match + r')?',\n reg)\n reg = re.sub(r'(\\s*){{\\s*(.+?)\\s*}}',\n r'\\1' + param_match,\n reg)\n reg = '^\\s*' + reg + r'\\s*$'\n\n # Now we're matching param_stream against our format string regex,\n # getting a dict of values. We'll also get default values from\n # \"params\" list if something is not present.\n # Priority, from lowest to highest:\n # 1. Default parameters\n # 2. Matched parameters\n # 3. Extra parameters\n matched_stream = re.match(reg, self._param_stream, re.DOTALL)\n if matched_stream:\n values = matched_stream.groupdict()\n for param in params:\n matched_value = values[param[0]] if matched_stream else None\n result[param[0]] = matched_value or param[1]\n if extra:\n for pair in kv_pairs:\n result[pair[0]] = ''.join(pair[2:])\n\n if self._format and not (self._param_stream.strip() or any(result.values())):\n raise content.ParseException('No value supplied and no default value found.')\n\n return result\n", "path": "st2common/st2common/models/utils/action_alias_utils.py"}]}
| 1,891 | 170 |
gh_patches_debug_31347
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-2641
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
dotnet install fails for prefixed packages
### search you tried in the issue tracker
dotnet tool
### describe your issue
A bit of an oversight when constructing `tool_name` here:
https://github.com/pre-commit/pre-commit/blob/cb0bcfd67fc35e91f7b2eca7e33bceda459dca77/pre_commit/languages/dotnet.py#L60-L63
E.g.
```console
$ pre-commit try-repo https://github.com/rkm/sample-dotnet-tool
[INFO] Initializing environment for https://github.com/rkm/sample-dotnet-tool.
===============================================================================
Using config:
===============================================================================
repos:
- repo: https://github.com/rkm/sample-dotnet-tool
rev: e53a3601bc06bb038dac30da813572291dd8d58f
hooks:
- id: sample-dotnet-tool
===============================================================================
[INFO] Installing environment for https://github.com/rkm/sample-dotnet-tool.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
An unexpected error has occurred: CalledProcessError: command: ('/home/rkm/bin/dotnet', 'tool', 'install', '--tool-path', '/tmp/tmp6bk4v26x/repotefhurdg/dotnetenv-default/bin', '--add-source', 'pre-commit-build', 'Rkm')
return code: 1
expected return code: 0
stdout:
/tmp/1873db78-d0a7-48ba-bbff-10a7ef85a2a6/restore.csproj : error NU1101: Unable to find package rkm. No packages exist with this id in source(s): /tmp/tmp6bk4v26x/repotefhurdg/pre-commit-build, nuget.org
stderr:
The tool package could not be restored.
Tool 'rkm' failed to install. This failure may have been caused by:
* You are attempting to install a preview release and did not use the --version option to specify the version.
* A package by this name was found, but it was not a .NET tool.
* The required NuGet feed cannot be accessed, perhaps because of an Internet connection problem.
* You mistyped the name of the tool.
For more reasons, including package naming enforcement, visit https://aka.ms/failure-installing-tool
Check the log at /home/rkm/.cache/pre-commit/pre-commit.log
```
### pre-commit --version
pre-commit 2.20.0
### .pre-commit-config.yaml
```yaml
repos:
- repo: https://github.com/rkm/sample-dotnet-tool
rev: e53a3601bc06bb038dac30da813572291dd8d58f
hooks:
- id: sample-dotnet-tool
```
### ~/.cache/pre-commit/pre-commit.log (if present)
_No response_
</issue>
<code>
[start of pre_commit/languages/dotnet.py]
1 from __future__ import annotations
2
3 import contextlib
4 import os.path
5 from typing import Generator
6 from typing import Sequence
7
8 import pre_commit.constants as C
9 from pre_commit.envcontext import envcontext
10 from pre_commit.envcontext import PatchesT
11 from pre_commit.envcontext import Var
12 from pre_commit.hook import Hook
13 from pre_commit.languages import helpers
14 from pre_commit.prefix import Prefix
15 from pre_commit.util import clean_path_on_failure
16
17 ENVIRONMENT_DIR = 'dotnetenv'
18 BIN_DIR = 'bin'
19
20 get_default_version = helpers.basic_get_default_version
21 health_check = helpers.basic_health_check
22
23
24 def get_env_patch(venv: str) -> PatchesT:
25 return (
26 ('PATH', (os.path.join(venv, BIN_DIR), os.pathsep, Var('PATH'))),
27 )
28
29
30 @contextlib.contextmanager
31 def in_env(prefix: Prefix) -> Generator[None, None, None]:
32 directory = helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT)
33 envdir = prefix.path(directory)
34 with envcontext(get_env_patch(envdir)):
35 yield
36
37
38 def install_environment(
39 prefix: Prefix,
40 version: str,
41 additional_dependencies: Sequence[str],
42 ) -> None:
43 helpers.assert_version_default('dotnet', version)
44 helpers.assert_no_additional_deps('dotnet', additional_dependencies)
45
46 envdir = prefix.path(helpers.environment_dir(ENVIRONMENT_DIR, version))
47 with clean_path_on_failure(envdir):
48 build_dir = 'pre-commit-build'
49
50 # Build & pack nupkg file
51 helpers.run_setup_cmd(
52 prefix,
53 (
54 'dotnet', 'pack',
55 '--configuration', 'Release',
56 '--output', build_dir,
57 ),
58 )
59
60 # Determine tool from the packaged file <tool_name>.<version>.nupkg
61 build_outputs = os.listdir(os.path.join(prefix.prefix_dir, build_dir))
62 for output in build_outputs:
63 tool_name = output.split('.')[0]
64
65 # Install to bin dir
66 helpers.run_setup_cmd(
67 prefix,
68 (
69 'dotnet', 'tool', 'install',
70 '--tool-path', os.path.join(envdir, BIN_DIR),
71 '--add-source', build_dir,
72 tool_name,
73 ),
74 )
75
76 # Clean the git dir, ignoring the environment dir
77 clean_cmd = ('git', 'clean', '-ffxd', '-e', f'{ENVIRONMENT_DIR}-*')
78 helpers.run_setup_cmd(prefix, clean_cmd)
79
80
81 def run_hook(
82 hook: Hook,
83 file_args: Sequence[str],
84 color: bool,
85 ) -> tuple[int, bytes]:
86 with in_env(hook.prefix):
87 return helpers.run_xargs(hook, hook.cmd, file_args, color=color)
88
[end of pre_commit/languages/dotnet.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pre_commit/languages/dotnet.py b/pre_commit/languages/dotnet.py
--- a/pre_commit/languages/dotnet.py
+++ b/pre_commit/languages/dotnet.py
@@ -2,6 +2,9 @@
import contextlib
import os.path
+import re
+import xml.etree.ElementTree
+import zipfile
from typing import Generator
from typing import Sequence
@@ -57,10 +60,29 @@
),
)
- # Determine tool from the packaged file <tool_name>.<version>.nupkg
- build_outputs = os.listdir(os.path.join(prefix.prefix_dir, build_dir))
- for output in build_outputs:
- tool_name = output.split('.')[0]
+ nupkg_dir = prefix.path(build_dir)
+ nupkgs = [x for x in os.listdir(nupkg_dir) if x.endswith('.nupkg')]
+
+ if not nupkgs:
+ raise AssertionError('could not find any build outputs to install')
+
+ for nupkg in nupkgs:
+ with zipfile.ZipFile(os.path.join(nupkg_dir, nupkg)) as f:
+ nuspec, = (x for x in f.namelist() if x.endswith('.nuspec'))
+ with f.open(nuspec) as spec:
+ tree = xml.etree.ElementTree.parse(spec)
+
+ namespace = re.match(r'{.*}', tree.getroot().tag)
+ if not namespace:
+ raise AssertionError('could not parse namespace from nuspec')
+
+ tool_id_element = tree.find(f'.//{namespace[0]}id')
+ if tool_id_element is None:
+ raise AssertionError('expected to find an "id" element')
+
+ tool_id = tool_id_element.text
+ if not tool_id:
+ raise AssertionError('"id" element missing tool name')
# Install to bin dir
helpers.run_setup_cmd(
@@ -69,7 +91,7 @@
'dotnet', 'tool', 'install',
'--tool-path', os.path.join(envdir, BIN_DIR),
'--add-source', build_dir,
- tool_name,
+ tool_id,
),
)
|
{"golden_diff": "diff --git a/pre_commit/languages/dotnet.py b/pre_commit/languages/dotnet.py\n--- a/pre_commit/languages/dotnet.py\n+++ b/pre_commit/languages/dotnet.py\n@@ -2,6 +2,9 @@\n \n import contextlib\n import os.path\n+import re\n+import xml.etree.ElementTree\n+import zipfile\n from typing import Generator\n from typing import Sequence\n \n@@ -57,10 +60,29 @@\n ),\n )\n \n- # Determine tool from the packaged file <tool_name>.<version>.nupkg\n- build_outputs = os.listdir(os.path.join(prefix.prefix_dir, build_dir))\n- for output in build_outputs:\n- tool_name = output.split('.')[0]\n+ nupkg_dir = prefix.path(build_dir)\n+ nupkgs = [x for x in os.listdir(nupkg_dir) if x.endswith('.nupkg')]\n+\n+ if not nupkgs:\n+ raise AssertionError('could not find any build outputs to install')\n+\n+ for nupkg in nupkgs:\n+ with zipfile.ZipFile(os.path.join(nupkg_dir, nupkg)) as f:\n+ nuspec, = (x for x in f.namelist() if x.endswith('.nuspec'))\n+ with f.open(nuspec) as spec:\n+ tree = xml.etree.ElementTree.parse(spec)\n+\n+ namespace = re.match(r'{.*}', tree.getroot().tag)\n+ if not namespace:\n+ raise AssertionError('could not parse namespace from nuspec')\n+\n+ tool_id_element = tree.find(f'.//{namespace[0]}id')\n+ if tool_id_element is None:\n+ raise AssertionError('expected to find an \"id\" element')\n+\n+ tool_id = tool_id_element.text\n+ if not tool_id:\n+ raise AssertionError('\"id\" element missing tool name')\n \n # Install to bin dir\n helpers.run_setup_cmd(\n@@ -69,7 +91,7 @@\n 'dotnet', 'tool', 'install',\n '--tool-path', os.path.join(envdir, BIN_DIR),\n '--add-source', build_dir,\n- tool_name,\n+ tool_id,\n ),\n )\n", "issue": "dotnet install fails for prefixed packages\n### search you tried in the issue tracker\n\ndotnet tool\n\n### describe your issue\n\nA bit of an oversight when constructing `tool_name` here:\r\n\r\nhttps://github.com/pre-commit/pre-commit/blob/cb0bcfd67fc35e91f7b2eca7e33bceda459dca77/pre_commit/languages/dotnet.py#L60-L63\r\n\r\nE.g.\r\n\r\n```console\r\n$ pre-commit try-repo https://github.com/rkm/sample-dotnet-tool\r\n[INFO] Initializing environment for https://github.com/rkm/sample-dotnet-tool.\r\n===============================================================================\r\nUsing config:\r\n===============================================================================\r\nrepos:\r\n- repo: https://github.com/rkm/sample-dotnet-tool\r\n rev: e53a3601bc06bb038dac30da813572291dd8d58f\r\n hooks:\r\n - id: sample-dotnet-tool\r\n===============================================================================\r\n[INFO] Installing environment for https://github.com/rkm/sample-dotnet-tool.\r\n[INFO] Once installed this environment will be reused.\r\n[INFO] This may take a few minutes...\r\nAn unexpected error has occurred: CalledProcessError: command: ('/home/rkm/bin/dotnet', 'tool', 'install', '--tool-path', '/tmp/tmp6bk4v26x/repotefhurdg/dotnetenv-default/bin', '--add-source', 'pre-commit-build', 'Rkm')\r\nreturn code: 1\r\nexpected return code: 0\r\nstdout:\r\n /tmp/1873db78-d0a7-48ba-bbff-10a7ef85a2a6/restore.csproj : error NU1101: Unable to find package rkm. No packages exist with this id in source(s): /tmp/tmp6bk4v26x/repotefhurdg/pre-commit-build, nuget.org\r\n\r\nstderr:\r\n The tool package could not be restored.\r\n Tool 'rkm' failed to install. This failure may have been caused by:\r\n\r\n * You are attempting to install a preview release and did not use the --version option to specify the version.\r\n * A package by this name was found, but it was not a .NET tool.\r\n * The required NuGet feed cannot be accessed, perhaps because of an Internet connection problem.\r\n * You mistyped the name of the tool.\r\n\r\n For more reasons, including package naming enforcement, visit https://aka.ms/failure-installing-tool\r\n\r\nCheck the log at /home/rkm/.cache/pre-commit/pre-commit.log\r\n```\n\n### pre-commit --version\n\npre-commit 2.20.0\n\n### .pre-commit-config.yaml\n\n```yaml\nrepos:\r\n- repo: https://github.com/rkm/sample-dotnet-tool\r\n rev: e53a3601bc06bb038dac30da813572291dd8d58f\r\n hooks:\r\n - id: sample-dotnet-tool\n```\n\n\n### ~/.cache/pre-commit/pre-commit.log (if present)\n\n_No response_\n", "before_files": [{"content": "from __future__ import annotations\n\nimport contextlib\nimport os.path\nfrom typing import Generator\nfrom typing import Sequence\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import clean_path_on_failure\n\nENVIRONMENT_DIR = 'dotnetenv'\nBIN_DIR = 'bin'\n\nget_default_version = helpers.basic_get_default_version\nhealth_check = helpers.basic_health_check\n\n\ndef get_env_patch(venv: str) -> PatchesT:\n return (\n ('PATH', (os.path.join(venv, BIN_DIR), os.pathsep, Var('PATH'))),\n )\n\n\[email protected]\ndef in_env(prefix: Prefix) -> Generator[None, None, None]:\n directory = helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT)\n envdir = prefix.path(directory)\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef install_environment(\n prefix: Prefix,\n version: str,\n additional_dependencies: Sequence[str],\n) -> None:\n helpers.assert_version_default('dotnet', version)\n helpers.assert_no_additional_deps('dotnet', additional_dependencies)\n\n envdir = prefix.path(helpers.environment_dir(ENVIRONMENT_DIR, version))\n with clean_path_on_failure(envdir):\n build_dir = 'pre-commit-build'\n\n # Build & pack nupkg file\n helpers.run_setup_cmd(\n prefix,\n (\n 'dotnet', 'pack',\n '--configuration', 'Release',\n '--output', build_dir,\n ),\n )\n\n # Determine tool from the packaged file <tool_name>.<version>.nupkg\n build_outputs = os.listdir(os.path.join(prefix.prefix_dir, build_dir))\n for output in build_outputs:\n tool_name = output.split('.')[0]\n\n # Install to bin dir\n helpers.run_setup_cmd(\n prefix,\n (\n 'dotnet', 'tool', 'install',\n '--tool-path', os.path.join(envdir, BIN_DIR),\n '--add-source', build_dir,\n tool_name,\n ),\n )\n\n # Clean the git dir, ignoring the environment dir\n clean_cmd = ('git', 'clean', '-ffxd', '-e', f'{ENVIRONMENT_DIR}-*')\n helpers.run_setup_cmd(prefix, clean_cmd)\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> tuple[int, bytes]:\n with in_env(hook.prefix):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n", "path": "pre_commit/languages/dotnet.py"}]}
| 1,968 | 491 |
gh_patches_debug_755
|
rasdani/github-patches
|
git_diff
|
Lightning-AI__torchmetrics-1384
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
module 'torchmetrics.classification' has no attribute 'AUC'
## 🐛 Bug
Importing all the classification metrics causes the `AttributeError`:
`from torchmetrics.classification import *`
`AttributeError: module 'torchmetrics.classification' has no attribute 'AUC'`
Environment
torchmetrics 0.11.0
pytorch 1.13.0
In order to fix it someone should remove AUC from the list __all__ (src/torchmetrics/classification/__init__.py)
</issue>
<code>
[start of src/torchmetrics/classification/__init__.py]
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from torchmetrics.classification.confusion_matrix import ( # isort:skip
15 BinaryConfusionMatrix,
16 ConfusionMatrix,
17 MulticlassConfusionMatrix,
18 MultilabelConfusionMatrix,
19 )
20 from torchmetrics.classification.precision_recall_curve import ( # isort:skip
21 PrecisionRecallCurve,
22 BinaryPrecisionRecallCurve,
23 MulticlassPrecisionRecallCurve,
24 MultilabelPrecisionRecallCurve,
25 )
26 from torchmetrics.classification.stat_scores import ( # isort:skip
27 BinaryStatScores,
28 MulticlassStatScores,
29 MultilabelStatScores,
30 StatScores,
31 )
32 from torchmetrics.classification.accuracy import Accuracy, BinaryAccuracy, MulticlassAccuracy, MultilabelAccuracy
33 from torchmetrics.classification.auroc import AUROC, BinaryAUROC, MulticlassAUROC, MultilabelAUROC
34 from torchmetrics.classification.average_precision import (
35 AveragePrecision,
36 BinaryAveragePrecision,
37 MulticlassAveragePrecision,
38 MultilabelAveragePrecision,
39 )
40 from torchmetrics.classification.calibration_error import (
41 BinaryCalibrationError,
42 CalibrationError,
43 MulticlassCalibrationError,
44 )
45 from torchmetrics.classification.cohen_kappa import BinaryCohenKappa, CohenKappa, MulticlassCohenKappa
46 from torchmetrics.classification.dice import Dice
47 from torchmetrics.classification.exact_match import ExactMatch, MulticlassExactMatch, MultilabelExactMatch
48 from torchmetrics.classification.f_beta import (
49 BinaryF1Score,
50 BinaryFBetaScore,
51 F1Score,
52 FBetaScore,
53 MulticlassF1Score,
54 MulticlassFBetaScore,
55 MultilabelF1Score,
56 MultilabelFBetaScore,
57 )
58 from torchmetrics.classification.hamming import (
59 BinaryHammingDistance,
60 HammingDistance,
61 MulticlassHammingDistance,
62 MultilabelHammingDistance,
63 )
64 from torchmetrics.classification.hinge import BinaryHingeLoss, HingeLoss, MulticlassHingeLoss
65 from torchmetrics.classification.jaccard import (
66 BinaryJaccardIndex,
67 JaccardIndex,
68 MulticlassJaccardIndex,
69 MultilabelJaccardIndex,
70 )
71 from torchmetrics.classification.matthews_corrcoef import (
72 BinaryMatthewsCorrCoef,
73 MatthewsCorrCoef,
74 MulticlassMatthewsCorrCoef,
75 MultilabelMatthewsCorrCoef,
76 )
77 from torchmetrics.classification.precision_recall import (
78 BinaryPrecision,
79 BinaryRecall,
80 MulticlassPrecision,
81 MulticlassRecall,
82 MultilabelPrecision,
83 MultilabelRecall,
84 Precision,
85 Recall,
86 )
87 from torchmetrics.classification.ranking import (
88 MultilabelCoverageError,
89 MultilabelRankingAveragePrecision,
90 MultilabelRankingLoss,
91 )
92 from torchmetrics.classification.recall_at_fixed_precision import (
93 BinaryRecallAtFixedPrecision,
94 MulticlassRecallAtFixedPrecision,
95 MultilabelRecallAtFixedPrecision,
96 )
97 from torchmetrics.classification.roc import ROC, BinaryROC, MulticlassROC, MultilabelROC
98 from torchmetrics.classification.specificity import (
99 BinarySpecificity,
100 MulticlassSpecificity,
101 MultilabelSpecificity,
102 Specificity,
103 )
104
105 __all__ = [
106 "BinaryConfusionMatrix",
107 "ConfusionMatrix",
108 "MulticlassConfusionMatrix",
109 "MultilabelConfusionMatrix",
110 "PrecisionRecallCurve",
111 "BinaryPrecisionRecallCurve",
112 "MulticlassPrecisionRecallCurve",
113 "MultilabelPrecisionRecallCurve",
114 "BinaryStatScores",
115 "MulticlassStatScores",
116 "MultilabelStatScores",
117 "StatScores",
118 "Accuracy",
119 "BinaryAccuracy",
120 "MulticlassAccuracy",
121 "MultilabelAccuracy",
122 "AUC",
123 "AUROC",
124 "BinaryAUROC",
125 "MulticlassAUROC",
126 "MultilabelAUROC",
127 "AveragePrecision",
128 "BinaryAveragePrecision",
129 "MulticlassAveragePrecision",
130 "MultilabelAveragePrecision",
131 "BinnedAveragePrecision",
132 "BinnedPrecisionRecallCurve",
133 "BinnedRecallAtFixedPrecision",
134 "BinaryCalibrationError",
135 "CalibrationError",
136 "MulticlassCalibrationError",
137 "BinaryCohenKappa",
138 "CohenKappa",
139 "MulticlassCohenKappa",
140 "Dice",
141 "ExactMatch",
142 "MulticlassExactMatch",
143 "MultilabelExactMatch",
144 "BinaryF1Score",
145 "BinaryFBetaScore",
146 "F1Score",
147 "FBetaScore",
148 "MulticlassF1Score",
149 "MulticlassFBetaScore",
150 "MultilabelF1Score",
151 "MultilabelFBetaScore",
152 "BinaryHammingDistance",
153 "HammingDistance",
154 "MulticlassHammingDistance",
155 "MultilabelHammingDistance",
156 "BinaryHingeLoss",
157 "HingeLoss",
158 "MulticlassHingeLoss",
159 "BinaryJaccardIndex",
160 "JaccardIndex",
161 "MulticlassJaccardIndex",
162 "MultilabelJaccardIndex",
163 "BinaryMatthewsCorrCoef",
164 "MatthewsCorrCoef",
165 "MulticlassMatthewsCorrCoef",
166 "MultilabelMatthewsCorrCoef",
167 "BinaryPrecision",
168 "BinaryRecall",
169 "MulticlassPrecision",
170 "MulticlassRecall",
171 "MultilabelPrecision",
172 "MultilabelRecall",
173 "Precision",
174 "Recall",
175 "CoverageError",
176 "LabelRankingAveragePrecision",
177 "LabelRankingLoss",
178 "MultilabelCoverageError",
179 "MultilabelRankingAveragePrecision",
180 "MultilabelRankingLoss",
181 "BinaryRecallAtFixedPrecision",
182 "MulticlassRecallAtFixedPrecision",
183 "MultilabelRecallAtFixedPrecision",
184 "ROC",
185 "BinaryROC",
186 "MulticlassROC",
187 "MultilabelROC",
188 "BinarySpecificity",
189 "MulticlassSpecificity",
190 "MultilabelSpecificity",
191 "Specificity",
192 ]
193
[end of src/torchmetrics/classification/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/torchmetrics/classification/__init__.py b/src/torchmetrics/classification/__init__.py
--- a/src/torchmetrics/classification/__init__.py
+++ b/src/torchmetrics/classification/__init__.py
@@ -119,7 +119,6 @@
"BinaryAccuracy",
"MulticlassAccuracy",
"MultilabelAccuracy",
- "AUC",
"AUROC",
"BinaryAUROC",
"MulticlassAUROC",
|
{"golden_diff": "diff --git a/src/torchmetrics/classification/__init__.py b/src/torchmetrics/classification/__init__.py\n--- a/src/torchmetrics/classification/__init__.py\n+++ b/src/torchmetrics/classification/__init__.py\n@@ -119,7 +119,6 @@\n \"BinaryAccuracy\",\n \"MulticlassAccuracy\",\n \"MultilabelAccuracy\",\n- \"AUC\",\n \"AUROC\",\n \"BinaryAUROC\",\n \"MulticlassAUROC\",\n", "issue": "module 'torchmetrics.classification' has no attribute 'AUC'\n## \ud83d\udc1b Bug\r\n\r\nImporting all the classification metrics causes the `AttributeError`:\r\n`from torchmetrics.classification import *`\r\n\r\n`AttributeError: module 'torchmetrics.classification' has no attribute 'AUC'`\r\n\r\nEnvironment\r\ntorchmetrics 0.11.0\r\npytorch 1.13.0\r\n\r\nIn order to fix it someone should remove AUC from the list __all__ (src/torchmetrics/classification/__init__.py)\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom torchmetrics.classification.confusion_matrix import ( # isort:skip\n BinaryConfusionMatrix,\n ConfusionMatrix,\n MulticlassConfusionMatrix,\n MultilabelConfusionMatrix,\n)\nfrom torchmetrics.classification.precision_recall_curve import ( # isort:skip\n PrecisionRecallCurve,\n BinaryPrecisionRecallCurve,\n MulticlassPrecisionRecallCurve,\n MultilabelPrecisionRecallCurve,\n)\nfrom torchmetrics.classification.stat_scores import ( # isort:skip\n BinaryStatScores,\n MulticlassStatScores,\n MultilabelStatScores,\n StatScores,\n)\nfrom torchmetrics.classification.accuracy import Accuracy, BinaryAccuracy, MulticlassAccuracy, MultilabelAccuracy\nfrom torchmetrics.classification.auroc import AUROC, BinaryAUROC, MulticlassAUROC, MultilabelAUROC\nfrom torchmetrics.classification.average_precision import (\n AveragePrecision,\n BinaryAveragePrecision,\n MulticlassAveragePrecision,\n MultilabelAveragePrecision,\n)\nfrom torchmetrics.classification.calibration_error import (\n BinaryCalibrationError,\n CalibrationError,\n MulticlassCalibrationError,\n)\nfrom torchmetrics.classification.cohen_kappa import BinaryCohenKappa, CohenKappa, MulticlassCohenKappa\nfrom torchmetrics.classification.dice import Dice\nfrom torchmetrics.classification.exact_match import ExactMatch, MulticlassExactMatch, MultilabelExactMatch\nfrom torchmetrics.classification.f_beta import (\n BinaryF1Score,\n BinaryFBetaScore,\n F1Score,\n FBetaScore,\n MulticlassF1Score,\n MulticlassFBetaScore,\n MultilabelF1Score,\n MultilabelFBetaScore,\n)\nfrom torchmetrics.classification.hamming import (\n BinaryHammingDistance,\n HammingDistance,\n MulticlassHammingDistance,\n MultilabelHammingDistance,\n)\nfrom torchmetrics.classification.hinge import BinaryHingeLoss, HingeLoss, MulticlassHingeLoss\nfrom torchmetrics.classification.jaccard import (\n BinaryJaccardIndex,\n JaccardIndex,\n MulticlassJaccardIndex,\n MultilabelJaccardIndex,\n)\nfrom torchmetrics.classification.matthews_corrcoef import (\n BinaryMatthewsCorrCoef,\n MatthewsCorrCoef,\n MulticlassMatthewsCorrCoef,\n MultilabelMatthewsCorrCoef,\n)\nfrom torchmetrics.classification.precision_recall import (\n BinaryPrecision,\n BinaryRecall,\n MulticlassPrecision,\n MulticlassRecall,\n MultilabelPrecision,\n MultilabelRecall,\n Precision,\n Recall,\n)\nfrom torchmetrics.classification.ranking import (\n MultilabelCoverageError,\n MultilabelRankingAveragePrecision,\n MultilabelRankingLoss,\n)\nfrom torchmetrics.classification.recall_at_fixed_precision import (\n BinaryRecallAtFixedPrecision,\n MulticlassRecallAtFixedPrecision,\n MultilabelRecallAtFixedPrecision,\n)\nfrom torchmetrics.classification.roc import ROC, BinaryROC, MulticlassROC, MultilabelROC\nfrom torchmetrics.classification.specificity import (\n BinarySpecificity,\n MulticlassSpecificity,\n MultilabelSpecificity,\n Specificity,\n)\n\n__all__ = [\n \"BinaryConfusionMatrix\",\n \"ConfusionMatrix\",\n \"MulticlassConfusionMatrix\",\n \"MultilabelConfusionMatrix\",\n \"PrecisionRecallCurve\",\n \"BinaryPrecisionRecallCurve\",\n \"MulticlassPrecisionRecallCurve\",\n \"MultilabelPrecisionRecallCurve\",\n \"BinaryStatScores\",\n \"MulticlassStatScores\",\n \"MultilabelStatScores\",\n \"StatScores\",\n \"Accuracy\",\n \"BinaryAccuracy\",\n \"MulticlassAccuracy\",\n \"MultilabelAccuracy\",\n \"AUC\",\n \"AUROC\",\n \"BinaryAUROC\",\n \"MulticlassAUROC\",\n \"MultilabelAUROC\",\n \"AveragePrecision\",\n \"BinaryAveragePrecision\",\n \"MulticlassAveragePrecision\",\n \"MultilabelAveragePrecision\",\n \"BinnedAveragePrecision\",\n \"BinnedPrecisionRecallCurve\",\n \"BinnedRecallAtFixedPrecision\",\n \"BinaryCalibrationError\",\n \"CalibrationError\",\n \"MulticlassCalibrationError\",\n \"BinaryCohenKappa\",\n \"CohenKappa\",\n \"MulticlassCohenKappa\",\n \"Dice\",\n \"ExactMatch\",\n \"MulticlassExactMatch\",\n \"MultilabelExactMatch\",\n \"BinaryF1Score\",\n \"BinaryFBetaScore\",\n \"F1Score\",\n \"FBetaScore\",\n \"MulticlassF1Score\",\n \"MulticlassFBetaScore\",\n \"MultilabelF1Score\",\n \"MultilabelFBetaScore\",\n \"BinaryHammingDistance\",\n \"HammingDistance\",\n \"MulticlassHammingDistance\",\n \"MultilabelHammingDistance\",\n \"BinaryHingeLoss\",\n \"HingeLoss\",\n \"MulticlassHingeLoss\",\n \"BinaryJaccardIndex\",\n \"JaccardIndex\",\n \"MulticlassJaccardIndex\",\n \"MultilabelJaccardIndex\",\n \"BinaryMatthewsCorrCoef\",\n \"MatthewsCorrCoef\",\n \"MulticlassMatthewsCorrCoef\",\n \"MultilabelMatthewsCorrCoef\",\n \"BinaryPrecision\",\n \"BinaryRecall\",\n \"MulticlassPrecision\",\n \"MulticlassRecall\",\n \"MultilabelPrecision\",\n \"MultilabelRecall\",\n \"Precision\",\n \"Recall\",\n \"CoverageError\",\n \"LabelRankingAveragePrecision\",\n \"LabelRankingLoss\",\n \"MultilabelCoverageError\",\n \"MultilabelRankingAveragePrecision\",\n \"MultilabelRankingLoss\",\n \"BinaryRecallAtFixedPrecision\",\n \"MulticlassRecallAtFixedPrecision\",\n \"MultilabelRecallAtFixedPrecision\",\n \"ROC\",\n \"BinaryROC\",\n \"MulticlassROC\",\n \"MultilabelROC\",\n \"BinarySpecificity\",\n \"MulticlassSpecificity\",\n \"MultilabelSpecificity\",\n \"Specificity\",\n]\n", "path": "src/torchmetrics/classification/__init__.py"}]}
| 2,578 | 108 |
gh_patches_debug_10932
|
rasdani/github-patches
|
git_diff
|
mozilla__pontoon-2370
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Set direction on placeables
*This issue was created automatically by a [script](https://github.com/mathjazz/bugzilla2github/).*
## [Bug 1692620](https://bugzilla.mozilla.org/show_bug.cgi?id=1692620)
Bug Reporter: @mathjazz
Created attachment 9202997
Screenshot
Everywhere we mark up placeables (i.e. everywhere we show strings in the translate view except in the textarea), we need to set the `dir` attribute to `ltr` in order to prevent bad display of placeables in RTL strings (as seen on the screenshot).
Pontoon picks the wrong folder format for locales (underscore instead of hyphens)
*This issue was created automatically by a [script](https://github.com/mathjazz/bugzilla2github/).*
## [Bug 1698636](https://bugzilla.mozilla.org/show_bug.cgi?id=1698636)
Bug Reporter: @flodolo
CC: @mathjazz
I created the repository with the source in `en-US` folder.
https://github.com/mozilla-l10n/mozilla-vpn-website-l10n/
Pontoon then synced `pt-BR` as `pt_BR`. One possible explanation is the presence of a file with underscores (`CODE_OF_CONDUCT.md`) in the repository. If that's the case, it's a bug, because we really care about folders, not files.
I then renamed `pt_BR` to use hyphens, synced translations for `en-CA` and it still used the wrong format (`en_CA`). Fixed this one too, synced a string for `en-GB`, and again wrong format.
At this point, I don't know if it will keep using _ for the next locales.
</issue>
<code>
[start of pontoon/sync/utils.py]
1 import errno
2 import os
3
4 from pontoon.base.models import Resource
5 from pontoon.base.utils import extension_in, first
6
7
8 def is_hidden(path):
9 """
10 Return true if path contains hidden directory.
11 """
12 for p in path.split(os.sep):
13 if p.startswith("."):
14 return True
15 return False
16
17
18 def is_resource(filename):
19 """
20 Return True if the filename's extension is a supported Resource
21 format.
22 """
23 return extension_in(filename, Resource.ALLOWED_EXTENSIONS)
24
25
26 def is_source_resource(filename):
27 """
28 Return True if the filename's extension is a source-only Resource
29 format.
30 """
31 return extension_in(filename, Resource.SOURCE_EXTENSIONS)
32
33
34 def is_asymmetric_resource(filename):
35 """
36 Return True if the filename's extension is an asymmetric Resource
37 format.
38 """
39 return extension_in(filename, Resource.ASYMMETRIC_FORMATS)
40
41
42 def get_parent_directory(path):
43 """
44 Get parent directory of the path
45 """
46 return os.path.abspath(os.path.join(path, os.pardir))
47
48
49 def uses_undercore_as_separator(directory):
50 """
51 Return True if any subdirectory contains underscore.
52 """
53 subdirs = os.listdir(directory)
54 return "".join(subdirs).count("_") > "".join(subdirs).count("-")
55
56
57 def directory_contains_resources(directory_path, source_only=False):
58 """
59 Return True if the given directory contains at least one
60 supported resource file (checked via file extension), or False
61 otherwise.
62
63 :param source_only:
64 If True, only check for source-only formats.
65 """
66 resource_check = is_source_resource if source_only else is_resource
67 for root, dirnames, filenames in os.walk(directory_path):
68 # first() avoids checking past the first matching resource.
69 if first(filenames, resource_check) is not None:
70 return True
71 return False
72
73
74 def locale_directory_path(checkout_path, locale_code, parent_directories=None):
75 """
76 Path to the directory where strings for the given locale are
77 stored.
78 """
79 possible_paths = []
80
81 # Check paths that use underscore as locale/country code separator
82 locale_code_variants = [locale_code, locale_code.replace("-", "_")]
83
84 # Optimization for directories with a lot of paths: if parent_directories
85 # is provided, we simply join it with locale_code and check if path exists
86 for parent_directory in parent_directories:
87 for locale in locale_code_variants:
88 candidate = os.path.join(parent_directory, locale)
89 if os.path.exists(candidate):
90 possible_paths.append(candidate)
91
92 if not possible_paths:
93 for root, dirnames, filenames in os.walk(checkout_path):
94 for locale in locale_code_variants:
95 if locale in dirnames:
96 possible_paths.append(os.path.join(root, locale))
97
98 for possible_path in possible_paths:
99 if directory_contains_resources(possible_path):
100 return possible_path
101
102 # If locale directory empty (asymmetric formats)
103 if possible_paths:
104 return possible_paths[0]
105
106 raise OSError("Directory for locale `{}` not found".format(locale_code or "source"))
107
108
109 def locale_to_source_path(path):
110 """
111 Return source resource path for the given locale resource path.
112 Source files for .po files are actually .pot.
113 """
114 if path.endswith("po"):
115 path += "t"
116 return path
117
118
119 def source_to_locale_path(path):
120 """
121 Return locale resource path for the given source resource path.
122 Locale files for .pot files are actually .po.
123 """
124 if path.endswith("pot"):
125 path = path[:-1]
126 return path
127
128
129 def escape_quotes(value):
130 """
131 DTD files can use single or double quotes for identifying strings,
132 so " and ' are the safe bet that will work in both cases.
133 """
134 value = value.replace('"', "\\"")
135 value = value.replace("'", "\\'")
136
137 return value
138
139
140 def unescape_quotes(value):
141 value = value.replace("\\"", '"')
142 value = value.replace("\\u0022", '"') # Bug 1390111
143 value = value.replace('\\"', '"')
144
145 value = value.replace("\\'", "'")
146 value = value.replace("\\u0027", "'") # Bug 1390111
147 value = value.replace("\\'", "'")
148
149 return value
150
151
152 def create_parent_directory(path):
153 """
154 Create parent directory of the given path if it doesn't exist yet.
155 """
156 try:
157 os.makedirs(os.path.dirname(path))
158 except OSError as e:
159 # Directory already exists
160 if e.errno == errno.EEXIST:
161 pass
162 else:
163 raise
164
[end of pontoon/sync/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pontoon/sync/utils.py b/pontoon/sync/utils.py
--- a/pontoon/sync/utils.py
+++ b/pontoon/sync/utils.py
@@ -48,10 +48,16 @@
def uses_undercore_as_separator(directory):
"""
- Return True if any subdirectory contains underscore.
+ Return True if the names of folders in a directory contain more '_' than '-'.
"""
+ only_folders = []
subdirs = os.listdir(directory)
- return "".join(subdirs).count("_") > "".join(subdirs).count("-")
+
+ for i in subdirs:
+ if os.path.isdir(os.path.join(directory, i)):
+ only_folders.append(i)
+
+ return "".join(only_folders).count("_") > "".join(only_folders).count("-")
def directory_contains_resources(directory_path, source_only=False):
|
{"golden_diff": "diff --git a/pontoon/sync/utils.py b/pontoon/sync/utils.py\n--- a/pontoon/sync/utils.py\n+++ b/pontoon/sync/utils.py\n@@ -48,10 +48,16 @@\n \n def uses_undercore_as_separator(directory):\n \"\"\"\n- Return True if any subdirectory contains underscore.\n+ Return True if the names of folders in a directory contain more '_' than '-'.\n \"\"\"\n+ only_folders = []\n subdirs = os.listdir(directory)\n- return \"\".join(subdirs).count(\"_\") > \"\".join(subdirs).count(\"-\")\n+\n+ for i in subdirs:\n+ if os.path.isdir(os.path.join(directory, i)):\n+ only_folders.append(i)\n+\n+ return \"\".join(only_folders).count(\"_\") > \"\".join(only_folders).count(\"-\")\n \n \n def directory_contains_resources(directory_path, source_only=False):\n", "issue": "Set direction on placeables\n*This issue was created automatically by a [script](https://github.com/mathjazz/bugzilla2github/).*\r\n## [Bug 1692620](https://bugzilla.mozilla.org/show_bug.cgi?id=1692620)\r\nBug Reporter: @mathjazz\r\n\r\nCreated attachment 9202997\r\nScreenshot\r\n\r\nEverywhere we mark up placeables (i.e. everywhere we show strings in the translate view except in the textarea), we need to set the `dir` attribute to `ltr` in order to prevent bad display of placeables in RTL strings (as seen on the screenshot).\nPontoon picks the wrong folder format for locales (underscore instead of hyphens)\n*This issue was created automatically by a [script](https://github.com/mathjazz/bugzilla2github/).*\n## [Bug 1698636](https://bugzilla.mozilla.org/show_bug.cgi?id=1698636)\nBug Reporter: @flodolo\nCC: @mathjazz\n\nI created the repository with the source in `en-US` folder.\nhttps://github.com/mozilla-l10n/mozilla-vpn-website-l10n/\n\nPontoon then synced `pt-BR` as `pt_BR`. One possible explanation is the presence of a file with underscores (`CODE_OF_CONDUCT.md`) in the repository. If that's the case, it's a bug, because we really care about folders, not files.\n\nI then renamed `pt_BR` to use hyphens, synced translations for `en-CA` and it still used the wrong format (`en_CA`). Fixed this one too, synced a string for `en-GB`, and again wrong format.\n\nAt this point, I don't know if it will keep using _ for the next locales.\n", "before_files": [{"content": "import errno\nimport os\n\nfrom pontoon.base.models import Resource\nfrom pontoon.base.utils import extension_in, first\n\n\ndef is_hidden(path):\n \"\"\"\n Return true if path contains hidden directory.\n \"\"\"\n for p in path.split(os.sep):\n if p.startswith(\".\"):\n return True\n return False\n\n\ndef is_resource(filename):\n \"\"\"\n Return True if the filename's extension is a supported Resource\n format.\n \"\"\"\n return extension_in(filename, Resource.ALLOWED_EXTENSIONS)\n\n\ndef is_source_resource(filename):\n \"\"\"\n Return True if the filename's extension is a source-only Resource\n format.\n \"\"\"\n return extension_in(filename, Resource.SOURCE_EXTENSIONS)\n\n\ndef is_asymmetric_resource(filename):\n \"\"\"\n Return True if the filename's extension is an asymmetric Resource\n format.\n \"\"\"\n return extension_in(filename, Resource.ASYMMETRIC_FORMATS)\n\n\ndef get_parent_directory(path):\n \"\"\"\n Get parent directory of the path\n \"\"\"\n return os.path.abspath(os.path.join(path, os.pardir))\n\n\ndef uses_undercore_as_separator(directory):\n \"\"\"\n Return True if any subdirectory contains underscore.\n \"\"\"\n subdirs = os.listdir(directory)\n return \"\".join(subdirs).count(\"_\") > \"\".join(subdirs).count(\"-\")\n\n\ndef directory_contains_resources(directory_path, source_only=False):\n \"\"\"\n Return True if the given directory contains at least one\n supported resource file (checked via file extension), or False\n otherwise.\n\n :param source_only:\n If True, only check for source-only formats.\n \"\"\"\n resource_check = is_source_resource if source_only else is_resource\n for root, dirnames, filenames in os.walk(directory_path):\n # first() avoids checking past the first matching resource.\n if first(filenames, resource_check) is not None:\n return True\n return False\n\n\ndef locale_directory_path(checkout_path, locale_code, parent_directories=None):\n \"\"\"\n Path to the directory where strings for the given locale are\n stored.\n \"\"\"\n possible_paths = []\n\n # Check paths that use underscore as locale/country code separator\n locale_code_variants = [locale_code, locale_code.replace(\"-\", \"_\")]\n\n # Optimization for directories with a lot of paths: if parent_directories\n # is provided, we simply join it with locale_code and check if path exists\n for parent_directory in parent_directories:\n for locale in locale_code_variants:\n candidate = os.path.join(parent_directory, locale)\n if os.path.exists(candidate):\n possible_paths.append(candidate)\n\n if not possible_paths:\n for root, dirnames, filenames in os.walk(checkout_path):\n for locale in locale_code_variants:\n if locale in dirnames:\n possible_paths.append(os.path.join(root, locale))\n\n for possible_path in possible_paths:\n if directory_contains_resources(possible_path):\n return possible_path\n\n # If locale directory empty (asymmetric formats)\n if possible_paths:\n return possible_paths[0]\n\n raise OSError(\"Directory for locale `{}` not found\".format(locale_code or \"source\"))\n\n\ndef locale_to_source_path(path):\n \"\"\"\n Return source resource path for the given locale resource path.\n Source files for .po files are actually .pot.\n \"\"\"\n if path.endswith(\"po\"):\n path += \"t\"\n return path\n\n\ndef source_to_locale_path(path):\n \"\"\"\n Return locale resource path for the given source resource path.\n Locale files for .pot files are actually .po.\n \"\"\"\n if path.endswith(\"pot\"):\n path = path[:-1]\n return path\n\n\ndef escape_quotes(value):\n \"\"\"\n DTD files can use single or double quotes for identifying strings,\n so " and ' are the safe bet that will work in both cases.\n \"\"\"\n value = value.replace('\"', \"\\\\"\")\n value = value.replace(\"'\", \"\\\\'\")\n\n return value\n\n\ndef unescape_quotes(value):\n value = value.replace(\"\\\\"\", '\"')\n value = value.replace(\"\\\\u0022\", '\"') # Bug 1390111\n value = value.replace('\\\\\"', '\"')\n\n value = value.replace(\"\\\\'\", \"'\")\n value = value.replace(\"\\\\u0027\", \"'\") # Bug 1390111\n value = value.replace(\"\\\\'\", \"'\")\n\n return value\n\n\ndef create_parent_directory(path):\n \"\"\"\n Create parent directory of the given path if it doesn't exist yet.\n \"\"\"\n try:\n os.makedirs(os.path.dirname(path))\n except OSError as e:\n # Directory already exists\n if e.errno == errno.EEXIST:\n pass\n else:\n raise\n", "path": "pontoon/sync/utils.py"}]}
| 2,339 | 194 |
gh_patches_debug_23170
|
rasdani/github-patches
|
git_diff
|
dask__dask-1792
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
read_parquet not passing through categories
Although fastparquet correctly loads a categorical, and it shows up in the underlying pandas dataframe, this information does not appear in the dask dataframe.
Failing case:
```
def test_cats_in_parquet():
s = pd.Series([b'a', b'b', b'c']*20, dtype='category')
d = pd.DataFrame({'b': s})
df = dd.from_pandas(d, npartitions=1)
to_parquet('temp.parq', df, write_index=True)
df2 = read_parquet('temp.parq', index='index', categories=['b'])
assert df2.compute().b.cat,categories == df.b.cat.categories
assert df2.b.cat.categories == df.b.cat.categories
```
</issue>
<code>
[start of dask/dataframe/io/parquet.py]
1 import struct
2
3 from toolz import first, partial
4
5 from ..core import DataFrame, Series
6 from ..utils import make_meta
7 from ...base import compute, tokenize, normalize_token
8 from ...delayed import delayed
9 from ...bytes.core import OpenFileCreator
10
11 try:
12 import fastparquet
13 from fastparquet import parquet_thrift
14 from fastparquet.core import read_row_group_file
15 default_encoding = parquet_thrift.Encoding.PLAIN
16 except:
17 fastparquet = False
18 default_encoding = None
19
20
21 def read_parquet(path, columns=None, filters=None, categories=None, index=None):
22 """
23 Read Dask DataFrame from ParquetFile
24
25 This reads a directory of Parquet data into a Dask.dataframe, one file per
26 partition. It selects the index among the sorted columns if any exist.
27
28 Parameters
29 ----------
30 path : string
31 Source directory for data.
32 Prepend with protocol like ``s3://`` or ``hdfs://`` for remote data.
33 columns: list or None
34 List of column names to load
35 filters: list
36 List of filters to apply, like ``[('x', '>' 0), ...]``
37 index: string or None
38 Name of index column to use if that column is sorted
39 categories: list or None
40 For any fields listed here, if the parquet encoding is Dictionary,
41 the column will be created with dtype category. Use only if it is
42 guaranteed that the column is encoded as dictionary in all row-groups.
43
44 Examples
45 --------
46 >>> df = read_parquet('s3://bucket/my-parquet-data') # doctest: +SKIP
47
48 See Also
49 --------
50 to_parquet
51 """
52 if fastparquet is False:
53 raise ImportError("fastparquet not installed")
54 if filters is None:
55 filters = []
56 myopen = OpenFileCreator(path, compression=None, text=False)
57
58 if isinstance(columns, list):
59 columns = tuple(columns)
60
61 try:
62 pf = fastparquet.ParquetFile(path + myopen.fs.sep + '_metadata',
63 open_with=myopen,
64 sep=myopen.fs.sep)
65 except:
66 pf = fastparquet.ParquetFile(path, open_with=myopen, sep=myopen.fs.sep)
67
68 name = 'read-parquet-' + tokenize(pf, columns, categories)
69
70 rgs = [rg for rg in pf.row_groups if
71 not(fastparquet.api.filter_out_stats(rg, filters, pf.helper)) and
72 not(fastparquet.api.filter_out_cats(rg, filters))]
73
74 # Find an index among the partially sorted columns
75 minmax = fastparquet.api.sorted_partitioned_columns(pf)
76
77 if index is False:
78 index_col = None
79 elif len(minmax) > 1:
80 if index:
81 index_col = index
82 else:
83 raise ValueError("Multiple possible indexes exist: %s. "
84 "Please select one with index='index-name'"
85 % sorted(minmax))
86 elif len(minmax) == 1:
87 index_col = first(minmax)
88 else:
89 index_col = None
90
91 if columns is None:
92 all_columns = tuple(pf.columns + list(pf.cats))
93 else:
94 all_columns = columns
95 if not isinstance(all_columns, tuple):
96 out_type = Series
97 all_columns = (all_columns,)
98 else:
99 out_type = DataFrame
100 if index_col and index_col not in all_columns:
101 all_columns = all_columns + (index_col,)
102
103 # TODO: if categories vary from one rg to next, need to cope
104 dtypes = {k: ('category' if k in (categories or []) else v) for k, v in
105 pf.dtypes.items() if k in all_columns}
106
107 meta = make_meta(dtypes)
108 if index_col:
109 meta = meta.set_index(index_col)
110
111 if out_type == Series:
112 assert len(meta.columns) == 1
113 meta = meta[meta.columns[0]]
114
115 dsk = {(name, i): (read_parquet_row_group, myopen, pf.row_group_filename(rg),
116 index_col, all_columns, rg, out_type == Series,
117 categories, pf.helper, pf.cats)
118 for i, rg in enumerate(rgs)}
119
120 if index_col:
121 divisions = list(minmax[index_col]['min']) + [minmax[index_col]['max'][-1]]
122 else:
123 divisions = (None,) * (len(rgs) + 1)
124
125 return out_type(dsk, name, meta, divisions)
126
127
128 def read_parquet_row_group(open, fn, index, columns, rg, series, *args):
129 if not isinstance(columns, (tuple, list)):
130 columns = (columns,)
131 series = True
132 if index and index not in columns:
133 columns = columns + type(columns)([index])
134 df = read_row_group_file(fn, rg, columns, *args, open=open)
135 if index:
136 df = df.set_index(index)
137
138 if series:
139 return df[df.columns[0]]
140 else:
141 return df
142
143
144 def to_parquet(path, df, encoding=default_encoding, compression=None,
145 write_index=None):
146 """
147 Write Dask.dataframe to parquet
148
149 Notes
150 -----
151 Each partition will be written to a separate file.
152
153 Parameters
154 ----------
155 path : string
156 Destination directory for data. Prepend with protocol like ``s3://``
157 or ``hdfs://`` for remote data.
158 df : Dask.dataframe
159 encoding : parquet_thrift.Encoding
160 compression : string or dict
161 Either a string like "SNAPPY" or a dictionary mapping column names to
162 compressors like ``{"name": "GZIP", "values": "SNAPPY"}``
163 write_index : boolean
164 Whether or not to write the index. Defaults to True *if* divisions are
165 known.
166
167 Examples
168 --------
169 >>> df = dd.read_csv(...) # doctest: +SKIP
170 >>> to_parquet('/path/to/output/', df, compression='SNAPPY') # doctest: +SKIP
171
172 See Also
173 --------
174 read_parquet: Read parquet data to dask.dataframe
175 """
176 if fastparquet is False:
177 raise ImportError("fastparquet not installed")
178
179 myopen = OpenFileCreator(path, compression=None, text=False)
180 myopen.fs.mkdirs(path)
181 sep = myopen.fs.sep
182 metadata_fn = sep.join([path, '_metadata'])
183
184 if write_index is True or write_index is None and df.known_divisions:
185 df = df.reset_index()
186
187 fmd = fastparquet.writer.make_metadata(df._meta_nonempty)
188
189 partitions = df.to_delayed()
190 filenames = ['part.%i.parquet' % i for i in range(len(partitions))]
191 outfiles = [sep.join([path, fn]) for fn in filenames]
192
193 writes = [delayed(fastparquet.writer.make_part_file)(
194 myopen(outfile, 'wb'), partition, fmd.schema,
195 compression=compression)
196 for outfile, partition in zip(outfiles, partitions)]
197
198 out = compute(*writes)
199
200 for fn, rg in zip(filenames, out):
201 for chunk in rg.columns:
202 chunk.file_path = fn
203 fmd.row_groups.append(rg)
204
205 with myopen(metadata_fn, mode='wb') as f:
206 f.write(b'PAR1')
207 foot_size = fastparquet.writer.write_thrift(f, fmd)
208 f.write(struct.pack(b"<i", foot_size))
209 f.write(b'PAR1')
210 f.close()
211
212 with myopen(sep.join([path, '_common_metadata']), mode='wb') as f:
213 fastparquet.writer.write_common_metadata(f, fmd)
214
215
216 if fastparquet:
217 @partial(normalize_token.register, fastparquet.ParquetFile)
218 def normalize_ParquetFile(pf):
219 return (type(pf), pf.fn, pf.sep) + normalize_token(pf.open)
220
[end of dask/dataframe/io/parquet.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/dask/dataframe/io/parquet.py b/dask/dataframe/io/parquet.py
--- a/dask/dataframe/io/parquet.py
+++ b/dask/dataframe/io/parquet.py
@@ -1,5 +1,6 @@
import struct
+import pandas as pd
from toolz import first, partial
from ..core import DataFrame, Series
@@ -71,6 +72,11 @@
not(fastparquet.api.filter_out_stats(rg, filters, pf.helper)) and
not(fastparquet.api.filter_out_cats(rg, filters))]
+ # get category values from first row-group
+ categories = categories or []
+ cats = pf.grab_cats(categories)
+ categories = [cat for cat in categories if cats.get(cat, None) is not None]
+
# Find an index among the partially sorted columns
minmax = fastparquet.api.sorted_partitioned_columns(pf)
@@ -105,6 +111,9 @@
pf.dtypes.items() if k in all_columns}
meta = make_meta(dtypes)
+ for cat in categories:
+ meta[cat] = pd.Series(pd.Categorical([], categories=cats[cat]))
+
if index_col:
meta = meta.set_index(index_col)
|
{"golden_diff": "diff --git a/dask/dataframe/io/parquet.py b/dask/dataframe/io/parquet.py\n--- a/dask/dataframe/io/parquet.py\n+++ b/dask/dataframe/io/parquet.py\n@@ -1,5 +1,6 @@\n import struct\n \n+import pandas as pd\n from toolz import first, partial\n \n from ..core import DataFrame, Series\n@@ -71,6 +72,11 @@\n not(fastparquet.api.filter_out_stats(rg, filters, pf.helper)) and\n not(fastparquet.api.filter_out_cats(rg, filters))]\n \n+ # get category values from first row-group\n+ categories = categories or []\n+ cats = pf.grab_cats(categories)\n+ categories = [cat for cat in categories if cats.get(cat, None) is not None]\n+\n # Find an index among the partially sorted columns\n minmax = fastparquet.api.sorted_partitioned_columns(pf)\n \n@@ -105,6 +111,9 @@\n pf.dtypes.items() if k in all_columns}\n \n meta = make_meta(dtypes)\n+ for cat in categories:\n+ meta[cat] = pd.Series(pd.Categorical([], categories=cats[cat]))\n+\n if index_col:\n meta = meta.set_index(index_col)\n", "issue": "read_parquet not passing through categories\nAlthough fastparquet correctly loads a categorical, and it shows up in the underlying pandas dataframe, this information does not appear in the dask dataframe.\r\n\r\nFailing case:\r\n```\r\ndef test_cats_in_parquet():\r\n s = pd.Series([b'a', b'b', b'c']*20, dtype='category')\r\n d = pd.DataFrame({'b': s})\r\n df = dd.from_pandas(d, npartitions=1)\r\n to_parquet('temp.parq', df, write_index=True)\r\n df2 = read_parquet('temp.parq', index='index', categories=['b'])\r\n assert df2.compute().b.cat,categories == df.b.cat.categories\r\n assert df2.b.cat.categories == df.b.cat.categories\r\n```\r\n\n", "before_files": [{"content": "import struct\n\nfrom toolz import first, partial\n\nfrom ..core import DataFrame, Series\nfrom ..utils import make_meta\nfrom ...base import compute, tokenize, normalize_token\nfrom ...delayed import delayed\nfrom ...bytes.core import OpenFileCreator\n\ntry:\n import fastparquet\n from fastparquet import parquet_thrift\n from fastparquet.core import read_row_group_file\n default_encoding = parquet_thrift.Encoding.PLAIN\nexcept:\n fastparquet = False\n default_encoding = None\n\n\ndef read_parquet(path, columns=None, filters=None, categories=None, index=None):\n \"\"\"\n Read Dask DataFrame from ParquetFile\n\n This reads a directory of Parquet data into a Dask.dataframe, one file per\n partition. It selects the index among the sorted columns if any exist.\n\n Parameters\n ----------\n path : string\n Source directory for data.\n Prepend with protocol like ``s3://`` or ``hdfs://`` for remote data.\n columns: list or None\n List of column names to load\n filters: list\n List of filters to apply, like ``[('x', '>' 0), ...]``\n index: string or None\n Name of index column to use if that column is sorted\n categories: list or None\n For any fields listed here, if the parquet encoding is Dictionary,\n the column will be created with dtype category. Use only if it is\n guaranteed that the column is encoded as dictionary in all row-groups.\n\n Examples\n --------\n >>> df = read_parquet('s3://bucket/my-parquet-data') # doctest: +SKIP\n\n See Also\n --------\n to_parquet\n \"\"\"\n if fastparquet is False:\n raise ImportError(\"fastparquet not installed\")\n if filters is None:\n filters = []\n myopen = OpenFileCreator(path, compression=None, text=False)\n\n if isinstance(columns, list):\n columns = tuple(columns)\n\n try:\n pf = fastparquet.ParquetFile(path + myopen.fs.sep + '_metadata',\n open_with=myopen,\n sep=myopen.fs.sep)\n except:\n pf = fastparquet.ParquetFile(path, open_with=myopen, sep=myopen.fs.sep)\n\n name = 'read-parquet-' + tokenize(pf, columns, categories)\n\n rgs = [rg for rg in pf.row_groups if\n not(fastparquet.api.filter_out_stats(rg, filters, pf.helper)) and\n not(fastparquet.api.filter_out_cats(rg, filters))]\n\n # Find an index among the partially sorted columns\n minmax = fastparquet.api.sorted_partitioned_columns(pf)\n\n if index is False:\n index_col = None\n elif len(minmax) > 1:\n if index:\n index_col = index\n else:\n raise ValueError(\"Multiple possible indexes exist: %s. \"\n \"Please select one with index='index-name'\"\n % sorted(minmax))\n elif len(minmax) == 1:\n index_col = first(minmax)\n else:\n index_col = None\n\n if columns is None:\n all_columns = tuple(pf.columns + list(pf.cats))\n else:\n all_columns = columns\n if not isinstance(all_columns, tuple):\n out_type = Series\n all_columns = (all_columns,)\n else:\n out_type = DataFrame\n if index_col and index_col not in all_columns:\n all_columns = all_columns + (index_col,)\n\n # TODO: if categories vary from one rg to next, need to cope\n dtypes = {k: ('category' if k in (categories or []) else v) for k, v in\n pf.dtypes.items() if k in all_columns}\n\n meta = make_meta(dtypes)\n if index_col:\n meta = meta.set_index(index_col)\n\n if out_type == Series:\n assert len(meta.columns) == 1\n meta = meta[meta.columns[0]]\n\n dsk = {(name, i): (read_parquet_row_group, myopen, pf.row_group_filename(rg),\n index_col, all_columns, rg, out_type == Series,\n categories, pf.helper, pf.cats)\n for i, rg in enumerate(rgs)}\n\n if index_col:\n divisions = list(minmax[index_col]['min']) + [minmax[index_col]['max'][-1]]\n else:\n divisions = (None,) * (len(rgs) + 1)\n\n return out_type(dsk, name, meta, divisions)\n\n\ndef read_parquet_row_group(open, fn, index, columns, rg, series, *args):\n if not isinstance(columns, (tuple, list)):\n columns = (columns,)\n series = True\n if index and index not in columns:\n columns = columns + type(columns)([index])\n df = read_row_group_file(fn, rg, columns, *args, open=open)\n if index:\n df = df.set_index(index)\n\n if series:\n return df[df.columns[0]]\n else:\n return df\n\n\ndef to_parquet(path, df, encoding=default_encoding, compression=None,\n write_index=None):\n \"\"\"\n Write Dask.dataframe to parquet\n\n Notes\n -----\n Each partition will be written to a separate file.\n\n Parameters\n ----------\n path : string\n Destination directory for data. Prepend with protocol like ``s3://``\n or ``hdfs://`` for remote data.\n df : Dask.dataframe\n encoding : parquet_thrift.Encoding\n compression : string or dict\n Either a string like \"SNAPPY\" or a dictionary mapping column names to\n compressors like ``{\"name\": \"GZIP\", \"values\": \"SNAPPY\"}``\n write_index : boolean\n Whether or not to write the index. Defaults to True *if* divisions are\n known.\n\n Examples\n --------\n >>> df = dd.read_csv(...) # doctest: +SKIP\n >>> to_parquet('/path/to/output/', df, compression='SNAPPY') # doctest: +SKIP\n\n See Also\n --------\n read_parquet: Read parquet data to dask.dataframe\n \"\"\"\n if fastparquet is False:\n raise ImportError(\"fastparquet not installed\")\n\n myopen = OpenFileCreator(path, compression=None, text=False)\n myopen.fs.mkdirs(path)\n sep = myopen.fs.sep\n metadata_fn = sep.join([path, '_metadata'])\n\n if write_index is True or write_index is None and df.known_divisions:\n df = df.reset_index()\n\n fmd = fastparquet.writer.make_metadata(df._meta_nonempty)\n\n partitions = df.to_delayed()\n filenames = ['part.%i.parquet' % i for i in range(len(partitions))]\n outfiles = [sep.join([path, fn]) for fn in filenames]\n\n writes = [delayed(fastparquet.writer.make_part_file)(\n myopen(outfile, 'wb'), partition, fmd.schema,\n compression=compression)\n for outfile, partition in zip(outfiles, partitions)]\n\n out = compute(*writes)\n\n for fn, rg in zip(filenames, out):\n for chunk in rg.columns:\n chunk.file_path = fn\n fmd.row_groups.append(rg)\n\n with myopen(metadata_fn, mode='wb') as f:\n f.write(b'PAR1')\n foot_size = fastparquet.writer.write_thrift(f, fmd)\n f.write(struct.pack(b\"<i\", foot_size))\n f.write(b'PAR1')\n f.close()\n\n with myopen(sep.join([path, '_common_metadata']), mode='wb') as f:\n fastparquet.writer.write_common_metadata(f, fmd)\n\n\nif fastparquet:\n @partial(normalize_token.register, fastparquet.ParquetFile)\n def normalize_ParquetFile(pf):\n return (type(pf), pf.fn, pf.sep) + normalize_token(pf.open)\n", "path": "dask/dataframe/io/parquet.py"}]}
| 3,017 | 284 |
gh_patches_debug_15047
|
rasdani/github-patches
|
git_diff
|
freedomofpress__securedrop-2694
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tor2Web warning does not display well
# Bug
## Description
The <strong> tags are incorrectly set.
## Steps to Reproduce
firefox https://zdf4nikyuswdzbt6.onion.to/
## Expected Behavior
Spaces between words
## Actual Behavior
Missing spaces and weird bold / regular changes.

</issue>
<code>
[start of securedrop/source_app/__init__.py]
1 from datetime import datetime, timedelta
2 from flask import (Flask, render_template, flash, Markup, request, g, session,
3 url_for, redirect)
4 from flask_babel import gettext
5 from flask_assets import Environment
6 from flask_wtf.csrf import CSRFProtect, CSRFError
7 from jinja2 import evalcontextfilter
8 from os import path
9 from sqlalchemy.orm.exc import NoResultFound
10
11 import crypto_util
12 import i18n
13 import store
14 import template_filters
15 import version
16
17 from db import Source, db_session
18 from request_that_secures_file_uploads import RequestThatSecuresFileUploads
19 from source_app import main, info, api
20 from source_app.decorators import ignore_static
21 from source_app.utils import logged_in
22
23
24 def create_app(config):
25 app = Flask(__name__,
26 template_folder=config.SOURCE_TEMPLATES_DIR,
27 static_folder=path.join(config.SECUREDROP_ROOT, 'static'))
28 app.request_class = RequestThatSecuresFileUploads
29 app.config.from_object(config.SourceInterfaceFlaskConfig)
30
31 # The default CSRF token expiration is 1 hour. Since large uploads can
32 # take longer than an hour over Tor, we increase the valid window to 24h.
33 app.config['WTF_CSRF_TIME_LIMIT'] = 60 * 60 * 24
34
35 CSRFProtect(app)
36
37 @app.errorhandler(CSRFError)
38 def handle_csrf_error(e):
39 msg = render_template('session_timeout.html')
40 session.clear()
41 flash(Markup(msg), "important")
42 return redirect(url_for('main.index'))
43
44 assets = Environment(app)
45 app.config['assets'] = assets
46
47 i18n.setup_app(app)
48
49 app.jinja_env.trim_blocks = True
50 app.jinja_env.lstrip_blocks = True
51 app.jinja_env.globals['version'] = version.__version__
52 if getattr(config, 'CUSTOM_HEADER_IMAGE', None):
53 app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE
54 app.jinja_env.globals['use_custom_header_image'] = True
55 else:
56 app.jinja_env.globals['header_image'] = 'logo.png'
57 app.jinja_env.globals['use_custom_header_image'] = False
58
59 app.jinja_env.filters['rel_datetime_format'] = \
60 template_filters.rel_datetime_format
61 app.jinja_env.filters['nl2br'] = evalcontextfilter(template_filters.nl2br)
62 app.jinja_env.filters['filesizeformat'] = template_filters.filesizeformat
63
64 for module in [main, info, api]:
65 app.register_blueprint(module.make_blueprint(config))
66
67 @app.before_request
68 @ignore_static
69 def check_tor2web():
70 # ignore_static here so we only flash a single message warning
71 # about Tor2Web, corresponding to the initial page load.
72 if 'X-tor2web' in request.headers:
73 flash(Markup(gettext(
74 '<strong>WARNING:</strong> You appear to be using Tor2Web. '
75 'This <strong>does not</strong> provide anonymity. '
76 '<a href="{url}">Why is this dangerous?</a>')
77 .format(url=url_for('info.tor2web_warning'))),
78 "banner-warning")
79
80 @app.before_request
81 @ignore_static
82 def setup_g():
83 """Store commonly used values in Flask's special g object"""
84 g.locale = i18n.get_locale()
85 g.text_direction = i18n.get_text_direction(g.locale)
86 g.html_lang = i18n.locale_to_rfc_5646(g.locale)
87 g.locales = i18n.get_locale2name()
88
89 if 'expires' in session and datetime.utcnow() >= session['expires']:
90 msg = render_template('session_timeout.html')
91
92 # clear the session after we render the message so it's localized
93 session.clear()
94
95 flash(Markup(msg), "important")
96
97 session['expires'] = datetime.utcnow() + \
98 timedelta(minutes=getattr(config,
99 'SESSION_EXPIRATION_MINUTES',
100 120))
101
102 # ignore_static here because `crypto_util.hash_codename` is scrypt
103 # (very time consuming), and we don't need to waste time running if
104 # we're just serving a static resource that won't need to access
105 # these common values.
106 if logged_in():
107 g.codename = session['codename']
108 g.filesystem_id = crypto_util.hash_codename(g.codename)
109 try:
110 g.source = Source.query \
111 .filter(Source.filesystem_id == g.filesystem_id) \
112 .one()
113 except NoResultFound as e:
114 app.logger.error(
115 "Found no Sources when one was expected: %s" %
116 (e,))
117 del session['logged_in']
118 del session['codename']
119 return redirect(url_for('main.index'))
120 g.loc = store.path(g.filesystem_id)
121
122 @app.teardown_appcontext
123 def shutdown_session(exception=None):
124 """Automatically remove database sessions at the end of the request, or
125 when the application shuts down"""
126 db_session.remove()
127
128 @app.errorhandler(404)
129 def page_not_found(error):
130 return render_template('notfound.html'), 404
131
132 @app.errorhandler(500)
133 def internal_error(error):
134 return render_template('error.html'), 500
135
136 return app
137
[end of securedrop/source_app/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/securedrop/source_app/__init__.py b/securedrop/source_app/__init__.py
--- a/securedrop/source_app/__init__.py
+++ b/securedrop/source_app/__init__.py
@@ -71,8 +71,10 @@
# about Tor2Web, corresponding to the initial page load.
if 'X-tor2web' in request.headers:
flash(Markup(gettext(
- '<strong>WARNING:</strong> You appear to be using Tor2Web. '
- 'This <strong>does not</strong> provide anonymity. '
+ '<strong>WARNING: </strong> '
+ 'You appear to be using Tor2Web. '
+ 'This <strong> does not </strong> '
+ 'provide anonymity. '
'<a href="{url}">Why is this dangerous?</a>')
.format(url=url_for('info.tor2web_warning'))),
"banner-warning")
|
{"golden_diff": "diff --git a/securedrop/source_app/__init__.py b/securedrop/source_app/__init__.py\n--- a/securedrop/source_app/__init__.py\n+++ b/securedrop/source_app/__init__.py\n@@ -71,8 +71,10 @@\n # about Tor2Web, corresponding to the initial page load.\n if 'X-tor2web' in request.headers:\n flash(Markup(gettext(\n- '<strong>WARNING:</strong> You appear to be using Tor2Web. '\n- 'This <strong>does not</strong> provide anonymity. '\n+ '<strong>WARNING: </strong> '\n+ 'You appear to be using Tor2Web. '\n+ 'This <strong> does not </strong> '\n+ 'provide anonymity. '\n '<a href=\"{url}\">Why is this dangerous?</a>')\n .format(url=url_for('info.tor2web_warning'))),\n \"banner-warning\")\n", "issue": "Tor2Web warning does not display well\n# Bug\r\n\r\n## Description\r\n\r\nThe <strong> tags are incorrectly set.\r\n\r\n## Steps to Reproduce\r\n\r\nfirefox https://zdf4nikyuswdzbt6.onion.to/\r\n\r\n## Expected Behavior\r\n\r\nSpaces between words\r\n\r\n## Actual Behavior\r\n\r\nMissing spaces and weird bold / regular changes.\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from datetime import datetime, timedelta\nfrom flask import (Flask, render_template, flash, Markup, request, g, session,\n url_for, redirect)\nfrom flask_babel import gettext\nfrom flask_assets import Environment\nfrom flask_wtf.csrf import CSRFProtect, CSRFError\nfrom jinja2 import evalcontextfilter\nfrom os import path\nfrom sqlalchemy.orm.exc import NoResultFound\n\nimport crypto_util\nimport i18n\nimport store\nimport template_filters\nimport version\n\nfrom db import Source, db_session\nfrom request_that_secures_file_uploads import RequestThatSecuresFileUploads\nfrom source_app import main, info, api\nfrom source_app.decorators import ignore_static\nfrom source_app.utils import logged_in\n\n\ndef create_app(config):\n app = Flask(__name__,\n template_folder=config.SOURCE_TEMPLATES_DIR,\n static_folder=path.join(config.SECUREDROP_ROOT, 'static'))\n app.request_class = RequestThatSecuresFileUploads\n app.config.from_object(config.SourceInterfaceFlaskConfig)\n\n # The default CSRF token expiration is 1 hour. Since large uploads can\n # take longer than an hour over Tor, we increase the valid window to 24h.\n app.config['WTF_CSRF_TIME_LIMIT'] = 60 * 60 * 24\n\n CSRFProtect(app)\n\n @app.errorhandler(CSRFError)\n def handle_csrf_error(e):\n msg = render_template('session_timeout.html')\n session.clear()\n flash(Markup(msg), \"important\")\n return redirect(url_for('main.index'))\n\n assets = Environment(app)\n app.config['assets'] = assets\n\n i18n.setup_app(app)\n\n app.jinja_env.trim_blocks = True\n app.jinja_env.lstrip_blocks = True\n app.jinja_env.globals['version'] = version.__version__\n if getattr(config, 'CUSTOM_HEADER_IMAGE', None):\n app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE\n app.jinja_env.globals['use_custom_header_image'] = True\n else:\n app.jinja_env.globals['header_image'] = 'logo.png'\n app.jinja_env.globals['use_custom_header_image'] = False\n\n app.jinja_env.filters['rel_datetime_format'] = \\\n template_filters.rel_datetime_format\n app.jinja_env.filters['nl2br'] = evalcontextfilter(template_filters.nl2br)\n app.jinja_env.filters['filesizeformat'] = template_filters.filesizeformat\n\n for module in [main, info, api]:\n app.register_blueprint(module.make_blueprint(config))\n\n @app.before_request\n @ignore_static\n def check_tor2web():\n # ignore_static here so we only flash a single message warning\n # about Tor2Web, corresponding to the initial page load.\n if 'X-tor2web' in request.headers:\n flash(Markup(gettext(\n '<strong>WARNING:</strong> You appear to be using Tor2Web. '\n 'This <strong>does not</strong> provide anonymity. '\n '<a href=\"{url}\">Why is this dangerous?</a>')\n .format(url=url_for('info.tor2web_warning'))),\n \"banner-warning\")\n\n @app.before_request\n @ignore_static\n def setup_g():\n \"\"\"Store commonly used values in Flask's special g object\"\"\"\n g.locale = i18n.get_locale()\n g.text_direction = i18n.get_text_direction(g.locale)\n g.html_lang = i18n.locale_to_rfc_5646(g.locale)\n g.locales = i18n.get_locale2name()\n\n if 'expires' in session and datetime.utcnow() >= session['expires']:\n msg = render_template('session_timeout.html')\n\n # clear the session after we render the message so it's localized\n session.clear()\n\n flash(Markup(msg), \"important\")\n\n session['expires'] = datetime.utcnow() + \\\n timedelta(minutes=getattr(config,\n 'SESSION_EXPIRATION_MINUTES',\n 120))\n\n # ignore_static here because `crypto_util.hash_codename` is scrypt\n # (very time consuming), and we don't need to waste time running if\n # we're just serving a static resource that won't need to access\n # these common values.\n if logged_in():\n g.codename = session['codename']\n g.filesystem_id = crypto_util.hash_codename(g.codename)\n try:\n g.source = Source.query \\\n .filter(Source.filesystem_id == g.filesystem_id) \\\n .one()\n except NoResultFound as e:\n app.logger.error(\n \"Found no Sources when one was expected: %s\" %\n (e,))\n del session['logged_in']\n del session['codename']\n return redirect(url_for('main.index'))\n g.loc = store.path(g.filesystem_id)\n\n @app.teardown_appcontext\n def shutdown_session(exception=None):\n \"\"\"Automatically remove database sessions at the end of the request, or\n when the application shuts down\"\"\"\n db_session.remove()\n\n @app.errorhandler(404)\n def page_not_found(error):\n return render_template('notfound.html'), 404\n\n @app.errorhandler(500)\n def internal_error(error):\n return render_template('error.html'), 500\n\n return app\n", "path": "securedrop/source_app/__init__.py"}]}
| 2,150 | 214 |
gh_patches_debug_32731
|
rasdani/github-patches
|
git_diff
|
microsoft__ptvsd-1454
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Stop on entry hangs the debugger
## Environment data
- PTVSD version: master
- OS and version: windows
- Python version (& distribution if applicable, e.g. Anaconda): 3.6
- Using VS Code or Visual Studio: VS or VSC
## Actual behavior
Note that you have to change the default for STOP_ON_ENTRY in wrapper.py#1198 to True.
Repros almost always on VS, but infrequently on VSC.
## Expected behavior
Should run and break on entry
## Steps to reproduce:
For VS:
1. Change wrapper.py:1198 default for STOP_ON_ENTRY to True.
2. Use F5 to start the debugger.
For VSC:
1. Set `stopOnEntry` in launch json.
</issue>
<code>
[start of src/ptvsd/debugger.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License. See LICENSE in the project root
3 # for license information.
4
5 import sys
6
7 import ptvsd.log
8 from ptvsd._local import run_module, run_file, run_main
9
10
11 # TODO: not needed?
12 DONT_DEBUG = []
13
14 LOCALHOST = 'localhost'
15
16 RUNNERS = {
17 'module': run_module, # python -m spam
18 'script': run_file, # python spam.py
19 'code': run_file, # python -c 'print("spam")'
20 None: run_file, # catchall
21 }
22
23
24 def debug(filename, port_num, debug_id, debug_options, run_as,
25 _runners=RUNNERS, _extra=None, *args, **kwargs):
26
27 ptvsd.log.to_file()
28 ptvsd.log.info('debug{0!r}', (filename, port_num, debug_id, debug_options, run_as))
29
30 if _extra is None:
31 _extra = sys.argv[1:]
32 address = (LOCALHOST, port_num)
33 try:
34 run = _runners[run_as]
35 except KeyError:
36 # TODO: fail?
37 run = _runners[None]
38 if _extra:
39 args = _extra + list(args)
40 kwargs.setdefault('singlesession', True)
41 run(address, filename, *args, **kwargs)
42
43
44 def run(filename, port_num, run_as,
45 *args, **kwargs):
46
47 ptvsd.log.to_file()
48 ptvsd.log.info('run{0!r}', (filename, port_num, run_as))
49
50 address = (LOCALHOST, port_num)
51 run_main(address, filename, run_as, *args, **kwargs)
52
[end of src/ptvsd/debugger.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/ptvsd/debugger.py b/src/ptvsd/debugger.py
--- a/src/ptvsd/debugger.py
+++ b/src/ptvsd/debugger.py
@@ -2,50 +2,38 @@
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
-import sys
-
import ptvsd.log
-from ptvsd._local import run_module, run_file, run_main
-
-
-# TODO: not needed?
-DONT_DEBUG = []
+import ptvsd.options
+from ptvsd.__main__ import run_file, run_module, run_code
-LOCALHOST = 'localhost'
RUNNERS = {
- 'module': run_module, # python -m spam
- 'script': run_file, # python spam.py
- 'code': run_file, # python -c 'print("spam")'
- None: run_file, # catchall
+ 'module': run_module,
+ 'script': run_file,
+ 'code': run_code,
}
+# Not actually used, but VS will try to add entries to it.
+DONT_DEBUG = []
-def debug(filename, port_num, debug_id, debug_options, run_as,
- _runners=RUNNERS, _extra=None, *args, **kwargs):
+# A legacy entrypoint for Visual Studio, to allow older versions to work with new ptvsd.
+# All new code should use the entrypoints in __main__ directly.
+def debug(filename, port_num, debug_id, debug_options, run_as):
ptvsd.log.to_file()
ptvsd.log.info('debug{0!r}', (filename, port_num, debug_id, debug_options, run_as))
- if _extra is None:
- _extra = sys.argv[1:]
- address = (LOCALHOST, port_num)
try:
- run = _runners[run_as]
+ run = RUNNERS[run_as]
except KeyError:
- # TODO: fail?
- run = _runners[None]
- if _extra:
- args = _extra + list(args)
- kwargs.setdefault('singlesession', True)
- run(address, filename, *args, **kwargs)
-
+ raise ValueError('run_as must be one of: {0!r}'.format(tuple(RUNNERS.keys())))
-def run(filename, port_num, run_as,
- *args, **kwargs):
+ ptvsd.options.target_kind = 'file' if run_as == 'script' else run_as
+ ptvsd.options.target = filename
+ ptvsd.options.port = port_num
+ ptvsd.options.client = True
- ptvsd.log.to_file()
- ptvsd.log.info('run{0!r}', (filename, port_num, run_as))
+ # debug_id is ignored because it has no meaning in DAP.
+ # debug_options are ignored, because they will be passed later via DAP "launch" request.
- address = (LOCALHOST, port_num)
- run_main(address, filename, run_as, *args, **kwargs)
+ run()
|
{"golden_diff": "diff --git a/src/ptvsd/debugger.py b/src/ptvsd/debugger.py\n--- a/src/ptvsd/debugger.py\n+++ b/src/ptvsd/debugger.py\n@@ -2,50 +2,38 @@\n # Licensed under the MIT License. See LICENSE in the project root\n # for license information.\n \n-import sys\n-\n import ptvsd.log\n-from ptvsd._local import run_module, run_file, run_main\n-\n-\n-# TODO: not needed?\n-DONT_DEBUG = []\n+import ptvsd.options\n+from ptvsd.__main__ import run_file, run_module, run_code\n \n-LOCALHOST = 'localhost'\n \n RUNNERS = {\n- 'module': run_module, # python -m spam\n- 'script': run_file, # python spam.py\n- 'code': run_file, # python -c 'print(\"spam\")'\n- None: run_file, # catchall\n+ 'module': run_module,\n+ 'script': run_file,\n+ 'code': run_code,\n }\n \n+# Not actually used, but VS will try to add entries to it.\n+DONT_DEBUG = []\n \n-def debug(filename, port_num, debug_id, debug_options, run_as,\n- _runners=RUNNERS, _extra=None, *args, **kwargs):\n \n+# A legacy entrypoint for Visual Studio, to allow older versions to work with new ptvsd.\n+# All new code should use the entrypoints in __main__ directly.\n+def debug(filename, port_num, debug_id, debug_options, run_as):\n ptvsd.log.to_file()\n ptvsd.log.info('debug{0!r}', (filename, port_num, debug_id, debug_options, run_as))\n \n- if _extra is None:\n- _extra = sys.argv[1:]\n- address = (LOCALHOST, port_num)\n try:\n- run = _runners[run_as]\n+ run = RUNNERS[run_as]\n except KeyError:\n- # TODO: fail?\n- run = _runners[None]\n- if _extra:\n- args = _extra + list(args)\n- kwargs.setdefault('singlesession', True)\n- run(address, filename, *args, **kwargs)\n-\n+ raise ValueError('run_as must be one of: {0!r}'.format(tuple(RUNNERS.keys())))\n \n-def run(filename, port_num, run_as,\n- *args, **kwargs):\n+ ptvsd.options.target_kind = 'file' if run_as == 'script' else run_as\n+ ptvsd.options.target = filename\n+ ptvsd.options.port = port_num\n+ ptvsd.options.client = True\n \n- ptvsd.log.to_file()\n- ptvsd.log.info('run{0!r}', (filename, port_num, run_as))\n+ # debug_id is ignored because it has no meaning in DAP.\n+ # debug_options are ignored, because they will be passed later via DAP \"launch\" request.\n \n- address = (LOCALHOST, port_num)\n- run_main(address, filename, run_as, *args, **kwargs)\n+ run()\n", "issue": "Stop on entry hangs the debugger\n## Environment data\r\n\r\n- PTVSD version: master\r\n- OS and version: windows\r\n- Python version (& distribution if applicable, e.g. Anaconda): 3.6\r\n- Using VS Code or Visual Studio: VS or VSC\r\n\r\n## Actual behavior\r\n\r\nNote that you have to change the default for STOP_ON_ENTRY in wrapper.py#1198 to True.\r\nRepros almost always on VS, but infrequently on VSC.\r\n\r\n## Expected behavior\r\n\r\nShould run and break on entry\r\n\r\n## Steps to reproduce:\r\nFor VS:\r\n1. Change wrapper.py:1198 default for STOP_ON_ENTRY to True.\r\n2. Use F5 to start the debugger.\r\n\r\nFor VSC:\r\n1. Set `stopOnEntry` in launch json.\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport sys\n\nimport ptvsd.log\nfrom ptvsd._local import run_module, run_file, run_main\n\n\n# TODO: not needed?\nDONT_DEBUG = []\n\nLOCALHOST = 'localhost'\n\nRUNNERS = {\n 'module': run_module, # python -m spam\n 'script': run_file, # python spam.py\n 'code': run_file, # python -c 'print(\"spam\")'\n None: run_file, # catchall\n}\n\n\ndef debug(filename, port_num, debug_id, debug_options, run_as,\n _runners=RUNNERS, _extra=None, *args, **kwargs):\n\n ptvsd.log.to_file()\n ptvsd.log.info('debug{0!r}', (filename, port_num, debug_id, debug_options, run_as))\n\n if _extra is None:\n _extra = sys.argv[1:]\n address = (LOCALHOST, port_num)\n try:\n run = _runners[run_as]\n except KeyError:\n # TODO: fail?\n run = _runners[None]\n if _extra:\n args = _extra + list(args)\n kwargs.setdefault('singlesession', True)\n run(address, filename, *args, **kwargs)\n\n\ndef run(filename, port_num, run_as,\n *args, **kwargs):\n\n ptvsd.log.to_file()\n ptvsd.log.info('run{0!r}', (filename, port_num, run_as))\n\n address = (LOCALHOST, port_num)\n run_main(address, filename, run_as, *args, **kwargs)\n", "path": "src/ptvsd/debugger.py"}]}
| 1,190 | 706 |
gh_patches_debug_17318
|
rasdani/github-patches
|
git_diff
|
nvaccess__nvda-13963
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Windows versions: let NVDA recognize Windows 10/11 Version 22H2
### Background:
As of November 2021, Microsoft has moved to annual update cycle for Windows 10 and 11. This means there is no YYH1 updates, making it a bit easier to predict what the next version of Windows 10 and 11 will be. This also incresaes uncertainty somewhat as feature update builds can be a bit unpredictable:
* Windows 10: Windows 10 is in maintenance mode (Vibranium releases). While people can guess what the build number can be (see below), we don't know when the final feature update will be released (I'm guessing 2023 based on support lifecycle for Home/Pro versus Enterprise/Education).
* Windows 11: build prediction is unpredictable as Microsoft can sign off on a build some time in northern spring with refresh branches used to prepare cumulative updates for the base build (compare co_release vs. co_refresh branches from last year).
### Is your feature request related to a problem? Please describe.
With the impending release of Windows 10 and 11 Version 22H2, let NVDA recognize 22H2 builds. This can aid in detecting Sun Valley 2 (SV2) builds for UIA console and other work.
### Describe the solution you'd like
Recognize the following builds:
* Windows 10 Version 22H2: 19045
* Windows 11 Version 22H2 (nickel/SV2): 22621
In addition, update AppX manifest to state that the last tested version is 10.0.22621.0 and ask folks to use Windows 11 SDK build 22621 (now available).
### Describe alternatives you've considered
Leave things as is.
### Additional context
Typically, new Windows builds are recognized once builds move to release preview channel. For Windows 11, this took place on June 7, 2022, and for Windows 10, it is being worked on internally.
### Pull request strategy:
Choose from one of the following paths assuming that a PR is created:
1. Merge the upcoming PR.
2. Wait until Windows 10 Version 22H2 is released to release preview Insiders before merging the upcoming PR.
Thanks.
</issue>
<code>
[start of source/winVersion.py]
1 # A part of NonVisual Desktop Access (NVDA)
2 # Copyright (C) 2006-2022 NV Access Limited, Bill Dengler, Joseph Lee
3 # This file is covered by the GNU General Public License.
4 # See the file COPYING for more details.
5
6 """A module used to record Windows versions.
7 It is also used to define feature checks such as
8 making sure NVDA can run on a minimum supported version of Windows.
9 """
10
11 from typing import Optional
12 import sys
13 import os
14 import functools
15 import winreg
16
17
18 # Records a mapping between Windows builds and release names.
19 # These include build 10240 for Windows 10 1507 and releases with multiple release builds.
20 # These are applicable to Windows 10 and later as they report the same system version (10.0).
21 _BUILDS_TO_RELEASE_NAMES = {
22 10240: "Windows 10 1507",
23 10586: "Windows 10 1511",
24 14393: "Windows 10 1607",
25 15063: "Windows 10 1703",
26 16299: "Windows 10 1709",
27 17134: "Windows 10 1803",
28 17763: "Windows 10 1809",
29 18362: "Windows 10 1903",
30 18363: "Windows 10 1909",
31 19041: "Windows 10 2004",
32 19042: "Windows 10 20H2",
33 19043: "Windows 10 21H1",
34 19044: "Windows 10 21H2",
35 20348: "Windows Server 2022",
36 22000: "Windows 11 21H2",
37 }
38
39
40 @functools.lru_cache(maxsize=1)
41 def _getRunningVersionNameFromWinReg() -> str:
42 """Returns the Windows release name defined in Windows Registry.
43 This is applicable on Windows 10 Version 1511 (build 10586) and later.
44 """
45 # Cache the version in use on the system.
46 with winreg.OpenKey(
47 winreg.HKEY_LOCAL_MACHINE, r"Software\Microsoft\Windows NT\CurrentVersion"
48 ) as currentVersion:
49 # Version 20H2 and later where a separate display version string is used.
50 try:
51 releaseId = winreg.QueryValueEx(currentVersion, "DisplayVersion")[0]
52 except OSError:
53 # Don't set anything if this is Windows 10 1507 or earlier.
54 try:
55 releaseId = winreg.QueryValueEx(currentVersion, "ReleaseID")[0]
56 except OSError:
57 raise RuntimeError(
58 "Release name is not recorded in Windows Registry on this version of Windows"
59 ) from None
60 return releaseId
61
62
63 @functools.total_ordering
64 class WinVersion(object):
65 """
66 Represents a Windows release.
67 Includes version major, minor, build, service pack information,
68 as well as tools such as checking for specific Windows 10 releases.
69 """
70
71 def __init__(
72 self,
73 major: int = 0,
74 minor: int = 0,
75 build: int = 0,
76 releaseName: Optional[str] = None,
77 servicePack: str = "",
78 productType: str = ""
79 ):
80 self.major = major
81 self.minor = minor
82 self.build = build
83 if releaseName:
84 self.releaseName = releaseName
85 else:
86 self.releaseName = self._getWindowsReleaseName()
87 self.servicePack = servicePack
88 self.productType = productType
89
90 def _getWindowsReleaseName(self) -> str:
91 """Returns the public release name for a given Windows release based on major, minor, and build.
92 This is useful if release names are not defined when constructing this class.
93 For example, 6.1 will return 'Windows 7'.
94 For Windows 10, feature update release name will be included.
95 On server systems, unless noted otherwise, client release names will be returned.
96 For example, 'Windows 10 1809' will be returned on Server 2019 systems.
97 """
98 if (self.major, self.minor) == (6, 1):
99 return "Windows 7"
100 elif (self.major, self.minor) == (6, 2):
101 return "Windows 8"
102 elif (self.major, self.minor) == (6, 3):
103 return "Windows 8.1"
104 elif self.major == 10:
105 # From Version 1511 (build 10586), release Id/display version comes from Windows Registry.
106 # However there are builds with no release name (Version 1507/10240)
107 # or releases with different builds.
108 # Look these up first before asking Windows Registry.
109 if self.build in _BUILDS_TO_RELEASE_NAMES:
110 return _BUILDS_TO_RELEASE_NAMES[self.build]
111 return "Windows 10 unknown"
112 else:
113 return "Windows release unknown"
114
115 def __repr__(self):
116 winVersionText = [self.releaseName]
117 winVersionText.append(f"({self.major}.{self.minor}.{self.build})")
118 if self.servicePack != "":
119 winVersionText.append(f"service pack {self.servicePack}")
120 if self.productType != "":
121 winVersionText.append(self.productType)
122 return " ".join(winVersionText)
123
124 def __eq__(self, other):
125 return (
126 (self.major, self.minor, self.build)
127 == (other.major, other.minor, other.build)
128 )
129
130 def __ge__(self, other):
131 return (
132 (self.major, self.minor, self.build)
133 >= (other.major, other.minor, other.build)
134 )
135
136
137 # Windows releases to WinVersion instances for easing comparisons.
138 WIN7 = WinVersion(major=6, minor=1, build=7600)
139 WIN7_SP1 = WinVersion(major=6, minor=1, build=7601, servicePack="1")
140 WIN8 = WinVersion(major=6, minor=2, build=9200)
141 WIN81 = WinVersion(major=6, minor=3, build=9600)
142 WIN10 = WIN10_1507 = WinVersion(major=10, minor=0, build=10240)
143 WIN10_1511 = WinVersion(major=10, minor=0, build=10586)
144 WIN10_1607 = WinVersion(major=10, minor=0, build=14393)
145 WIN10_1703 = WinVersion(major=10, minor=0, build=15063)
146 WIN10_1709 = WinVersion(major=10, minor=0, build=16299)
147 WIN10_1803 = WinVersion(major=10, minor=0, build=17134)
148 WIN10_1809 = WinVersion(major=10, minor=0, build=17763)
149 WIN10_1903 = WinVersion(major=10, minor=0, build=18362)
150 WIN10_1909 = WinVersion(major=10, minor=0, build=18363)
151 WIN10_2004 = WinVersion(major=10, minor=0, build=19041)
152 WIN10_20H2 = WinVersion(major=10, minor=0, build=19042)
153 WIN10_21H1 = WinVersion(major=10, minor=0, build=19043)
154 WIN10_21H2 = WinVersion(major=10, minor=0, build=19044)
155 WINSERVER_2022 = WinVersion(major=10, minor=0, build=20348)
156 WIN11 = WIN11_21H2 = WinVersion(major=10, minor=0, build=22000)
157
158
159 @functools.lru_cache(maxsize=1)
160 def getWinVer():
161 """Returns a record of current Windows version NVDA is running on.
162 """
163 winVer = sys.getwindowsversion()
164 # #12509: on Windows 10, fetch whatever Windows Registry says for the current build.
165 # #12626: note that not all Windows 10 releases are labeled "Windows 10"
166 # (build 22000 is Windows 11 despite major.minor being 10.0).
167 try:
168 if WinVersion(
169 major=winVer.major,
170 minor=winVer.minor,
171 build=winVer.build
172 ) >= WIN11:
173 releaseName = f"Windows 11 {_getRunningVersionNameFromWinReg()}"
174 else:
175 releaseName = f"Windows 10 {_getRunningVersionNameFromWinReg()}"
176 except RuntimeError:
177 releaseName = None
178 return WinVersion(
179 major=winVer.major,
180 minor=winVer.minor,
181 build=winVer.build,
182 releaseName=releaseName,
183 servicePack=winVer.service_pack,
184 productType=("workstation", "domain controller", "server")[winVer.product_type - 1]
185 )
186
187
188 def isSupportedOS():
189 # NVDA can only run on Windows 7 Service pack 1 and above
190 return getWinVer() >= WIN7_SP1
191
192
193 UWP_OCR_DATA_PATH = os.path.expandvars(r"$windir\OCR")
194
195
196 def isUwpOcrAvailable():
197 return os.path.isdir(UWP_OCR_DATA_PATH)
198
199
200 def isFullScreenMagnificationAvailable() -> bool:
201 """
202 Technically this is always False. The Magnification API has been marked by MS as unsupported for
203 WOW64 applications such as NVDA. For our usages, support has been added since Windows 8, relying on our
204 testing our specific usage of the API with each Windows version since Windows 8
205 """
206 return getWinVer() >= WIN8
207
[end of source/winVersion.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/source/winVersion.py b/source/winVersion.py
--- a/source/winVersion.py
+++ b/source/winVersion.py
@@ -32,6 +32,7 @@
19042: "Windows 10 20H2",
19043: "Windows 10 21H1",
19044: "Windows 10 21H2",
+ 19045: "Windows 10 22H2",
20348: "Windows Server 2022",
22000: "Windows 11 21H2",
}
@@ -152,6 +153,7 @@
WIN10_20H2 = WinVersion(major=10, minor=0, build=19042)
WIN10_21H1 = WinVersion(major=10, minor=0, build=19043)
WIN10_21H2 = WinVersion(major=10, minor=0, build=19044)
+WIN10_22H2 = WinVersion(major=10, minor=0, build=19045)
WINSERVER_2022 = WinVersion(major=10, minor=0, build=20348)
WIN11 = WIN11_21H2 = WinVersion(major=10, minor=0, build=22000)
|
{"golden_diff": "diff --git a/source/winVersion.py b/source/winVersion.py\n--- a/source/winVersion.py\n+++ b/source/winVersion.py\n@@ -32,6 +32,7 @@\n \t19042: \"Windows 10 20H2\",\r\n \t19043: \"Windows 10 21H1\",\r\n \t19044: \"Windows 10 21H2\",\r\n+\t19045: \"Windows 10 22H2\",\r\n \t20348: \"Windows Server 2022\",\r\n \t22000: \"Windows 11 21H2\",\r\n }\r\n@@ -152,6 +153,7 @@\n WIN10_20H2 = WinVersion(major=10, minor=0, build=19042)\r\n WIN10_21H1 = WinVersion(major=10, minor=0, build=19043)\r\n WIN10_21H2 = WinVersion(major=10, minor=0, build=19044)\r\n+WIN10_22H2 = WinVersion(major=10, minor=0, build=19045)\r\n WINSERVER_2022 = WinVersion(major=10, minor=0, build=20348)\r\n WIN11 = WIN11_21H2 = WinVersion(major=10, minor=0, build=22000)\n", "issue": "Windows versions: let NVDA recognize Windows 10/11 Version 22H2\n### Background:\r\nAs of November 2021, Microsoft has moved to annual update cycle for Windows 10 and 11. This means there is no YYH1 updates, making it a bit easier to predict what the next version of Windows 10 and 11 will be. This also incresaes uncertainty somewhat as feature update builds can be a bit unpredictable:\r\n\r\n* Windows 10: Windows 10 is in maintenance mode (Vibranium releases). While people can guess what the build number can be (see below), we don't know when the final feature update will be released (I'm guessing 2023 based on support lifecycle for Home/Pro versus Enterprise/Education).\r\n* Windows 11: build prediction is unpredictable as Microsoft can sign off on a build some time in northern spring with refresh branches used to prepare cumulative updates for the base build (compare co_release vs. co_refresh branches from last year).\r\n\r\n### Is your feature request related to a problem? Please describe.\r\nWith the impending release of Windows 10 and 11 Version 22H2, let NVDA recognize 22H2 builds. This can aid in detecting Sun Valley 2 (SV2) builds for UIA console and other work.\r\n\r\n### Describe the solution you'd like\r\nRecognize the following builds:\r\n\r\n* Windows 10 Version 22H2: 19045\r\n* Windows 11 Version 22H2 (nickel/SV2): 22621\r\n\r\nIn addition, update AppX manifest to state that the last tested version is 10.0.22621.0 and ask folks to use Windows 11 SDK build 22621 (now available).\r\n\r\n### Describe alternatives you've considered\r\nLeave things as is.\r\n\r\n### Additional context\r\nTypically, new Windows builds are recognized once builds move to release preview channel. For Windows 11, this took place on June 7, 2022, and for Windows 10, it is being worked on internally.\r\n\r\n### Pull request strategy:\r\nChoose from one of the following paths assuming that a PR is created:\r\n\r\n1. Merge the upcoming PR.\r\n2. Wait until Windows 10 Version 22H2 is released to release preview Insiders before merging the upcoming PR.\r\n\r\nThanks.\n", "before_files": [{"content": "# A part of NonVisual Desktop Access (NVDA)\r\n# Copyright (C) 2006-2022 NV Access Limited, Bill Dengler, Joseph Lee\r\n# This file is covered by the GNU General Public License.\r\n# See the file COPYING for more details.\r\n\r\n\"\"\"A module used to record Windows versions.\r\nIt is also used to define feature checks such as\r\nmaking sure NVDA can run on a minimum supported version of Windows.\r\n\"\"\"\r\n\r\nfrom typing import Optional\r\nimport sys\r\nimport os\r\nimport functools\r\nimport winreg\r\n\r\n\r\n# Records a mapping between Windows builds and release names.\r\n# These include build 10240 for Windows 10 1507 and releases with multiple release builds.\r\n# These are applicable to Windows 10 and later as they report the same system version (10.0).\r\n_BUILDS_TO_RELEASE_NAMES = {\r\n\t10240: \"Windows 10 1507\",\r\n\t10586: \"Windows 10 1511\",\r\n\t14393: \"Windows 10 1607\",\r\n\t15063: \"Windows 10 1703\",\r\n\t16299: \"Windows 10 1709\",\r\n\t17134: \"Windows 10 1803\",\r\n\t17763: \"Windows 10 1809\",\r\n\t18362: \"Windows 10 1903\",\r\n\t18363: \"Windows 10 1909\",\r\n\t19041: \"Windows 10 2004\",\r\n\t19042: \"Windows 10 20H2\",\r\n\t19043: \"Windows 10 21H1\",\r\n\t19044: \"Windows 10 21H2\",\r\n\t20348: \"Windows Server 2022\",\r\n\t22000: \"Windows 11 21H2\",\r\n}\r\n\r\n\r\[email protected]_cache(maxsize=1)\r\ndef _getRunningVersionNameFromWinReg() -> str:\r\n\t\"\"\"Returns the Windows release name defined in Windows Registry.\r\n\tThis is applicable on Windows 10 Version 1511 (build 10586) and later.\r\n\t\"\"\"\r\n\t# Cache the version in use on the system.\r\n\twith winreg.OpenKey(\r\n\t\twinreg.HKEY_LOCAL_MACHINE, r\"Software\\Microsoft\\Windows NT\\CurrentVersion\"\r\n\t) as currentVersion:\r\n\t\t# Version 20H2 and later where a separate display version string is used.\r\n\t\ttry:\r\n\t\t\treleaseId = winreg.QueryValueEx(currentVersion, \"DisplayVersion\")[0]\r\n\t\texcept OSError:\r\n\t\t\t# Don't set anything if this is Windows 10 1507 or earlier.\r\n\t\t\ttry:\r\n\t\t\t\treleaseId = winreg.QueryValueEx(currentVersion, \"ReleaseID\")[0]\r\n\t\t\texcept OSError:\r\n\t\t\t\traise RuntimeError(\r\n\t\t\t\t\t\"Release name is not recorded in Windows Registry on this version of Windows\"\r\n\t\t\t\t) from None\r\n\treturn releaseId\r\n\r\n\r\[email protected]_ordering\r\nclass WinVersion(object):\r\n\t\"\"\"\r\n\tRepresents a Windows release.\r\n\tIncludes version major, minor, build, service pack information,\r\n\tas well as tools such as checking for specific Windows 10 releases.\r\n\t\"\"\"\r\n\r\n\tdef __init__(\r\n\t\t\tself,\r\n\t\t\tmajor: int = 0,\r\n\t\t\tminor: int = 0,\r\n\t\t\tbuild: int = 0,\r\n\t\t\treleaseName: Optional[str] = None,\r\n\t\t\tservicePack: str = \"\",\r\n\t\t\tproductType: str = \"\"\r\n\t):\r\n\t\tself.major = major\r\n\t\tself.minor = minor\r\n\t\tself.build = build\r\n\t\tif releaseName:\r\n\t\t\tself.releaseName = releaseName\r\n\t\telse:\r\n\t\t\tself.releaseName = self._getWindowsReleaseName()\r\n\t\tself.servicePack = servicePack\r\n\t\tself.productType = productType\r\n\r\n\tdef _getWindowsReleaseName(self) -> str:\r\n\t\t\"\"\"Returns the public release name for a given Windows release based on major, minor, and build.\r\n\t\tThis is useful if release names are not defined when constructing this class.\r\n\t\tFor example, 6.1 will return 'Windows 7'.\r\n\t\tFor Windows 10, feature update release name will be included.\r\n\t\tOn server systems, unless noted otherwise, client release names will be returned.\r\n\t\tFor example, 'Windows 10 1809' will be returned on Server 2019 systems.\r\n\t\t\"\"\"\r\n\t\tif (self.major, self.minor) == (6, 1):\r\n\t\t\treturn \"Windows 7\"\r\n\t\telif (self.major, self.minor) == (6, 2):\r\n\t\t\treturn \"Windows 8\"\r\n\t\telif (self.major, self.minor) == (6, 3):\r\n\t\t\treturn \"Windows 8.1\"\r\n\t\telif self.major == 10:\r\n\t\t\t# From Version 1511 (build 10586), release Id/display version comes from Windows Registry.\r\n\t\t\t# However there are builds with no release name (Version 1507/10240)\r\n\t\t\t# or releases with different builds.\r\n\t\t\t# Look these up first before asking Windows Registry.\r\n\t\t\tif self.build in _BUILDS_TO_RELEASE_NAMES:\r\n\t\t\t\treturn _BUILDS_TO_RELEASE_NAMES[self.build]\r\n\t\t\treturn \"Windows 10 unknown\"\r\n\t\telse:\r\n\t\t\treturn \"Windows release unknown\"\r\n\r\n\tdef __repr__(self):\r\n\t\twinVersionText = [self.releaseName]\r\n\t\twinVersionText.append(f\"({self.major}.{self.minor}.{self.build})\")\r\n\t\tif self.servicePack != \"\":\r\n\t\t\twinVersionText.append(f\"service pack {self.servicePack}\")\r\n\t\tif self.productType != \"\":\r\n\t\t\twinVersionText.append(self.productType)\r\n\t\treturn \" \".join(winVersionText)\r\n\r\n\tdef __eq__(self, other):\r\n\t\treturn (\r\n\t\t\t(self.major, self.minor, self.build)\r\n\t\t\t== (other.major, other.minor, other.build)\r\n\t\t)\r\n\r\n\tdef __ge__(self, other):\r\n\t\treturn (\r\n\t\t\t(self.major, self.minor, self.build)\r\n\t\t\t>= (other.major, other.minor, other.build)\r\n\t\t)\r\n\r\n\r\n# Windows releases to WinVersion instances for easing comparisons.\r\nWIN7 = WinVersion(major=6, minor=1, build=7600)\r\nWIN7_SP1 = WinVersion(major=6, minor=1, build=7601, servicePack=\"1\")\r\nWIN8 = WinVersion(major=6, minor=2, build=9200)\r\nWIN81 = WinVersion(major=6, minor=3, build=9600)\r\nWIN10 = WIN10_1507 = WinVersion(major=10, minor=0, build=10240)\r\nWIN10_1511 = WinVersion(major=10, minor=0, build=10586)\r\nWIN10_1607 = WinVersion(major=10, minor=0, build=14393)\r\nWIN10_1703 = WinVersion(major=10, minor=0, build=15063)\r\nWIN10_1709 = WinVersion(major=10, minor=0, build=16299)\r\nWIN10_1803 = WinVersion(major=10, minor=0, build=17134)\r\nWIN10_1809 = WinVersion(major=10, minor=0, build=17763)\r\nWIN10_1903 = WinVersion(major=10, minor=0, build=18362)\r\nWIN10_1909 = WinVersion(major=10, minor=0, build=18363)\r\nWIN10_2004 = WinVersion(major=10, minor=0, build=19041)\r\nWIN10_20H2 = WinVersion(major=10, minor=0, build=19042)\r\nWIN10_21H1 = WinVersion(major=10, minor=0, build=19043)\r\nWIN10_21H2 = WinVersion(major=10, minor=0, build=19044)\r\nWINSERVER_2022 = WinVersion(major=10, minor=0, build=20348)\r\nWIN11 = WIN11_21H2 = WinVersion(major=10, minor=0, build=22000)\r\n\r\n\r\[email protected]_cache(maxsize=1)\r\ndef getWinVer():\r\n\t\"\"\"Returns a record of current Windows version NVDA is running on.\r\n\t\"\"\"\r\n\twinVer = sys.getwindowsversion()\r\n\t# #12509: on Windows 10, fetch whatever Windows Registry says for the current build.\r\n\t# #12626: note that not all Windows 10 releases are labeled \"Windows 10\"\r\n\t# (build 22000 is Windows 11 despite major.minor being 10.0).\r\n\ttry:\r\n\t\tif WinVersion(\r\n\t\t\tmajor=winVer.major,\r\n\t\t\tminor=winVer.minor,\r\n\t\t\tbuild=winVer.build\r\n\t\t) >= WIN11:\r\n\t\t\treleaseName = f\"Windows 11 {_getRunningVersionNameFromWinReg()}\"\r\n\t\telse:\r\n\t\t\treleaseName = f\"Windows 10 {_getRunningVersionNameFromWinReg()}\"\r\n\texcept RuntimeError:\r\n\t\treleaseName = None\r\n\treturn WinVersion(\r\n\t\tmajor=winVer.major,\r\n\t\tminor=winVer.minor,\r\n\t\tbuild=winVer.build,\r\n\t\treleaseName=releaseName,\r\n\t\tservicePack=winVer.service_pack,\r\n\t\tproductType=(\"workstation\", \"domain controller\", \"server\")[winVer.product_type - 1]\r\n\t)\r\n\r\n\r\ndef isSupportedOS():\r\n\t# NVDA can only run on Windows 7 Service pack 1 and above\r\n\treturn getWinVer() >= WIN7_SP1\r\n\r\n\r\nUWP_OCR_DATA_PATH = os.path.expandvars(r\"$windir\\OCR\")\r\n\r\n\r\ndef isUwpOcrAvailable():\r\n\treturn os.path.isdir(UWP_OCR_DATA_PATH)\r\n\r\n\r\ndef isFullScreenMagnificationAvailable() -> bool:\r\n\t\"\"\"\r\n\tTechnically this is always False. The Magnification API has been marked by MS as unsupported for\r\n\tWOW64 applications such as NVDA. For our usages, support has been added since Windows 8, relying on our\r\n\ttesting our specific usage of the API with each Windows version since Windows 8\r\n\t\"\"\"\r\n\treturn getWinVer() >= WIN8\r\n", "path": "source/winVersion.py"}]}
| 3,977 | 346 |
gh_patches_debug_32318
|
rasdani/github-patches
|
git_diff
|
opsdroid__opsdroid-210
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add case-insensitive kwarg to the regex matcher
It would be nice to specify case insensitive matching in the regex matcher.
e.g
```python
@match_regex(r'myregex', case_sensitive=False)
async def myskill(opsdroid, config, message):
pass
```
</issue>
<code>
[start of opsdroid/parsers/regex.py]
1 """A helper function for parsing and executing regex skills."""
2
3 import logging
4 import re
5
6
7 _LOGGER = logging.getLogger(__name__)
8
9
10 async def parse_regex(opsdroid, message):
11 """Parse a message against all regex skills."""
12 # pylint: disable=broad-except
13 # We want to catch all exceptions coming from a skill module and not
14 # halt the application. If a skill throws an exception it just doesn't
15 # give a response to the user, so an error response should be given.
16 for skill in opsdroid.skills:
17 if "regex" in skill:
18 regex = re.match(skill["regex"], message.text)
19 if regex:
20 message.regex = regex
21 try:
22 await skill["skill"](opsdroid, skill["config"], message)
23 except Exception:
24 await message.respond(
25 "Whoops there has been an error")
26 await message.respond(
27 "Check the log for details")
28 _LOGGER.exception("Exception when parsing '" +
29 message.text +
30 "' against skill '" +
31 skill["regex"] + "'")
32
[end of opsdroid/parsers/regex.py]
[start of opsdroid/matchers.py]
1 """Decorator functions to use when creating skill modules."""
2
3 import logging
4
5 from opsdroid.helper import get_opsdroid
6 from opsdroid.web import Web
7
8
9 _LOGGER = logging.getLogger(__name__)
10
11
12 def match_regex(regex):
13 """Return regex match decorator."""
14 def matcher(func):
15 """Add decorated function to skills list for regex matching."""
16 opsdroid = get_opsdroid()
17 opsdroid.skills.append({"regex": regex, "skill": func,
18 "config":
19 opsdroid.loader.current_import_config})
20 return func
21 return matcher
22
23
24 def match_apiai_action(action):
25 """Return apiai action match decorator."""
26 def matcher(func):
27 """Add decorated function to skills list for apiai matching."""
28 opsdroid = get_opsdroid()
29 opsdroid.skills.append({"apiai_action": action, "skill": func,
30 "config":
31 opsdroid.loader.current_import_config})
32 return func
33 return matcher
34
35
36 def match_apiai_intent(intent):
37 """Return apiai intent match decorator."""
38 def matcher(func):
39 """Add decorated function to skills list for apiai matching."""
40 opsdroid = get_opsdroid()
41 opsdroid.skills.append({"apiai_intent": intent, "skill": func,
42 "config":
43 opsdroid.loader.current_import_config})
44 return func
45 return matcher
46
47
48 def match_crontab(crontab, timezone=None):
49 """Return crontab match decorator."""
50 def matcher(func):
51 """Add decorated function to skills list for crontab matching."""
52 opsdroid = get_opsdroid()
53 config = opsdroid.loader.current_import_config
54 opsdroid.skills.append({"crontab": crontab, "skill": func,
55 "config": config, "timezone": timezone})
56 return func
57 return matcher
58
59
60 def match_webhook(webhook):
61 """Return webhook match decorator."""
62 def matcher(func):
63 """Add decorated function to skills list for webhook matching."""
64 opsdroid = get_opsdroid()
65 config = opsdroid.loader.current_import_config
66 opsdroid.skills.append({"webhook": webhook, "skill": func,
67 "config": config})
68
69 async def wrapper(req, opsdroid=opsdroid, config=config):
70 """Wrap up the aiohttp handler."""
71 _LOGGER.info("Running skill %s via webhook", webhook)
72 opsdroid.stats["webhooks_called"] = \
73 opsdroid.stats["webhooks_called"] + 1
74 await func(opsdroid, config, req)
75 return Web.build_response(200, {"called_skill": webhook})
76
77 opsdroid.web_server.web_app.router.add_post(
78 "/skill/{}/{}".format(config["name"], webhook), wrapper)
79 opsdroid.web_server.web_app.router.add_post(
80 "/skill/{}/{}/".format(config["name"], webhook), wrapper)
81
82 return func
83 return matcher
84
[end of opsdroid/matchers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/opsdroid/matchers.py b/opsdroid/matchers.py
--- a/opsdroid/matchers.py
+++ b/opsdroid/matchers.py
@@ -9,12 +9,14 @@
_LOGGER = logging.getLogger(__name__)
-def match_regex(regex):
+def match_regex(regex, case_sensitive=True):
"""Return regex match decorator."""
def matcher(func):
"""Add decorated function to skills list for regex matching."""
opsdroid = get_opsdroid()
- opsdroid.skills.append({"regex": regex, "skill": func,
+ opsdroid.skills.append({"regex": {"expression": regex,
+ "case_sensitive": case_sensitive},
+ "skill": func,
"config":
opsdroid.loader.current_import_config})
return func
diff --git a/opsdroid/parsers/regex.py b/opsdroid/parsers/regex.py
--- a/opsdroid/parsers/regex.py
+++ b/opsdroid/parsers/regex.py
@@ -15,7 +15,12 @@
# give a response to the user, so an error response should be given.
for skill in opsdroid.skills:
if "regex" in skill:
- regex = re.match(skill["regex"], message.text)
+ if skill["regex"]["case_sensitive"]:
+ regex = re.match(skill["regex"]["expression"],
+ message.text)
+ else:
+ regex = re.match(skill["regex"]["expression"],
+ message.text, re.IGNORECASE)
if regex:
message.regex = regex
try:
@@ -28,4 +33,4 @@
_LOGGER.exception("Exception when parsing '" +
message.text +
"' against skill '" +
- skill["regex"] + "'")
+ skill["regex"]["expression"] + "'")
|
{"golden_diff": "diff --git a/opsdroid/matchers.py b/opsdroid/matchers.py\n--- a/opsdroid/matchers.py\n+++ b/opsdroid/matchers.py\n@@ -9,12 +9,14 @@\n _LOGGER = logging.getLogger(__name__)\n \n \n-def match_regex(regex):\n+def match_regex(regex, case_sensitive=True):\n \"\"\"Return regex match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for regex matching.\"\"\"\n opsdroid = get_opsdroid()\n- opsdroid.skills.append({\"regex\": regex, \"skill\": func,\n+ opsdroid.skills.append({\"regex\": {\"expression\": regex,\n+ \"case_sensitive\": case_sensitive},\n+ \"skill\": func,\n \"config\":\n opsdroid.loader.current_import_config})\n return func\ndiff --git a/opsdroid/parsers/regex.py b/opsdroid/parsers/regex.py\n--- a/opsdroid/parsers/regex.py\n+++ b/opsdroid/parsers/regex.py\n@@ -15,7 +15,12 @@\n # give a response to the user, so an error response should be given.\n for skill in opsdroid.skills:\n if \"regex\" in skill:\n- regex = re.match(skill[\"regex\"], message.text)\n+ if skill[\"regex\"][\"case_sensitive\"]:\n+ regex = re.match(skill[\"regex\"][\"expression\"],\n+ message.text)\n+ else:\n+ regex = re.match(skill[\"regex\"][\"expression\"],\n+ message.text, re.IGNORECASE)\n if regex:\n message.regex = regex\n try:\n@@ -28,4 +33,4 @@\n _LOGGER.exception(\"Exception when parsing '\" +\n message.text +\n \"' against skill '\" +\n- skill[\"regex\"] + \"'\")\n+ skill[\"regex\"][\"expression\"] + \"'\")\n", "issue": "Add case-insensitive kwarg to the regex matcher\nIt would be nice to specify case insensitive matching in the regex matcher.\r\n\r\ne.g\r\n```python\r\n@match_regex(r'myregex', case_sensitive=False)\r\nasync def myskill(opsdroid, config, message):\r\n pass\r\n```\n", "before_files": [{"content": "\"\"\"A helper function for parsing and executing regex skills.\"\"\"\n\nimport logging\nimport re\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def parse_regex(opsdroid, message):\n \"\"\"Parse a message against all regex skills.\"\"\"\n # pylint: disable=broad-except\n # We want to catch all exceptions coming from a skill module and not\n # halt the application. If a skill throws an exception it just doesn't\n # give a response to the user, so an error response should be given.\n for skill in opsdroid.skills:\n if \"regex\" in skill:\n regex = re.match(skill[\"regex\"], message.text)\n if regex:\n message.regex = regex\n try:\n await skill[\"skill\"](opsdroid, skill[\"config\"], message)\n except Exception:\n await message.respond(\n \"Whoops there has been an error\")\n await message.respond(\n \"Check the log for details\")\n _LOGGER.exception(\"Exception when parsing '\" +\n message.text +\n \"' against skill '\" +\n skill[\"regex\"] + \"'\")\n", "path": "opsdroid/parsers/regex.py"}, {"content": "\"\"\"Decorator functions to use when creating skill modules.\"\"\"\n\nimport logging\n\nfrom opsdroid.helper import get_opsdroid\nfrom opsdroid.web import Web\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef match_regex(regex):\n \"\"\"Return regex match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for regex matching.\"\"\"\n opsdroid = get_opsdroid()\n opsdroid.skills.append({\"regex\": regex, \"skill\": func,\n \"config\":\n opsdroid.loader.current_import_config})\n return func\n return matcher\n\n\ndef match_apiai_action(action):\n \"\"\"Return apiai action match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for apiai matching.\"\"\"\n opsdroid = get_opsdroid()\n opsdroid.skills.append({\"apiai_action\": action, \"skill\": func,\n \"config\":\n opsdroid.loader.current_import_config})\n return func\n return matcher\n\n\ndef match_apiai_intent(intent):\n \"\"\"Return apiai intent match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for apiai matching.\"\"\"\n opsdroid = get_opsdroid()\n opsdroid.skills.append({\"apiai_intent\": intent, \"skill\": func,\n \"config\":\n opsdroid.loader.current_import_config})\n return func\n return matcher\n\n\ndef match_crontab(crontab, timezone=None):\n \"\"\"Return crontab match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for crontab matching.\"\"\"\n opsdroid = get_opsdroid()\n config = opsdroid.loader.current_import_config\n opsdroid.skills.append({\"crontab\": crontab, \"skill\": func,\n \"config\": config, \"timezone\": timezone})\n return func\n return matcher\n\n\ndef match_webhook(webhook):\n \"\"\"Return webhook match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for webhook matching.\"\"\"\n opsdroid = get_opsdroid()\n config = opsdroid.loader.current_import_config\n opsdroid.skills.append({\"webhook\": webhook, \"skill\": func,\n \"config\": config})\n\n async def wrapper(req, opsdroid=opsdroid, config=config):\n \"\"\"Wrap up the aiohttp handler.\"\"\"\n _LOGGER.info(\"Running skill %s via webhook\", webhook)\n opsdroid.stats[\"webhooks_called\"] = \\\n opsdroid.stats[\"webhooks_called\"] + 1\n await func(opsdroid, config, req)\n return Web.build_response(200, {\"called_skill\": webhook})\n\n opsdroid.web_server.web_app.router.add_post(\n \"/skill/{}/{}\".format(config[\"name\"], webhook), wrapper)\n opsdroid.web_server.web_app.router.add_post(\n \"/skill/{}/{}/\".format(config[\"name\"], webhook), wrapper)\n\n return func\n return matcher\n", "path": "opsdroid/matchers.py"}]}
| 1,702 | 404 |
gh_patches_debug_24300
|
rasdani/github-patches
|
git_diff
|
avocado-framework__avocado-3978
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
jobs get-output-files should split downloaded files into subdirectories
> ```
> ll /home/cleber/avocado/job-results/job-2020-06-04T15.17-9bd5b5e
> total 708
> -rw-rw-r--. 1 cleber cleber 41 Jun 4 15:17 id
> drwxrwxr-x. 2 cleber cleber 4096 Jun 4 15:17 jobdata
> -rw-rw-r--. 1 cleber cleber 15818 Jun 4 15:17 job.log
> -rw-rw-r--. 1 cleber cleber 668480 Jun 4 15:17 results.html
> -rw-rw-r--. 1 cleber cleber 1029 Jun 4 15:17 results.json
> -rw-rw-r--. 1 cleber cleber 36 Jun 4 15:17 results.tap
> -rw-rw-r--. 1 cleber cleber 345 Jun 4 15:17 results.xml
> drwxrwxr-x. 5 cleber cleber 4096 Jun 4 15:17 sysinfo
> -rw-rw-r--. 1 cleber cleber 16 Jun 4 15:18 test2_created
> -rw-rw-r--. 1 cleber cleber 16 Jun 4 15:18 test_created
> drwxrwxr-x. 4 cleber cleber 4096 Jun 4 15:17 test-results
> ```
>
> I had the expectation that, `test_created` would to to `test-results/1-_tmp_t.sh/` and `test2_created` would go to `test-results/2-_tmp_t2.sh/`. Is this expected now? Yes, no, in the future?
</issue>
<code>
[start of avocado/plugins/jobs.py]
1 # This program is free software; you can redistribute it and/or modify
2 # it under the terms of the GNU General Public License as published by
3 # the Free Software Foundation; either version 2 of the License, or
4 # (at your option) any later version.
5 #
6 # This program is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
9 #
10 # See LICENSE for more details.
11 #
12 # Copyright: Red Hat Inc. 2020
13 # Authors: Beraldo Leal <[email protected]>
14
15 """
16 Jobs subcommand
17 """
18 import json
19 import os
20
21 from datetime import datetime
22 from glob import glob
23
24 from avocado.core import exit_codes
25 from avocado.core import output
26 from avocado.core.data_dir import get_job_results_dir, get_logs_dir
27 from avocado.core.future.settings import settings
28 from avocado.core.output import LOG_UI
29 from avocado.core.plugin_interfaces import CLICmd
30 from avocado.core.spawners.exceptions import SpawnerException
31 from avocado.core.spawners.podman import PodmanSpawner
32 from avocado.core.spawners.process import ProcessSpawner
33 from avocado.utils import astring
34
35
36 class Jobs(CLICmd):
37 """
38 Implements the avocado 'jobs' subcommand
39 """
40 name = 'jobs'
41 description = 'Manage Avocado jobs'
42
43 def _get_data_from_file(self, filename):
44 if not filename or not os.path.isfile(filename):
45 raise FileNotFoundError('File not found {}'.format(filename))
46
47 with open(filename, 'r') as fp:
48 return json.load(fp)
49
50 def _print_job_details(self, details):
51 for key, value in details.items():
52 LOG_UI.info("%-12s: %s", key, value)
53
54 def _print_job_tests(self, tests):
55 test_matrix = []
56 date_fmt = "%Y/%m/%d %H:%M:%S"
57 for test in tests:
58 status = test.get('status')
59 decorator = output.TEST_STATUS_DECORATOR_MAPPING.get(status)
60 end = datetime.fromtimestamp(test.get('end'))
61 test_matrix.append((test.get('id'),
62 end.strftime(date_fmt),
63 "%5f" % float(test.get('time')),
64 decorator(status, '')))
65 header = (output.TERM_SUPPORT.header_str('Test ID'),
66 output.TERM_SUPPORT.header_str('End Time'),
67 output.TERM_SUPPORT.header_str('Run Time'),
68 output.TERM_SUPPORT.header_str('Status'))
69 for line in astring.iter_tabular_output(test_matrix,
70 header=header,
71 strip=True):
72 LOG_UI.debug(line)
73
74 def _save_stream_to_file(self, stream, filename):
75 """Save stream to a file.
76
77 Directory must exists before calling this function.
78 """
79 dirname = os.path.dirname(filename)
80 if not os.path.isdir(dirname):
81 LOG_UI.error("%s does not exist. Exiting...", dirname)
82 return exit_codes.AVOCADO_GENERIC_CRASH
83
84 with open(filename, 'ab') as output_file:
85 output_file.write(stream)
86
87 def configure(self, parser):
88 """
89 Add the subparser for the assets action.
90
91 :param parser: The Avocado command line application parser
92 :type parser: :class:`avocado.core.parser.ArgumentParser`
93 """
94 parser = super(Jobs, self).configure(parser)
95
96 subcommands = parser.add_subparsers(dest='jobs_subcommand',
97 metavar='sub-command')
98 subcommands.required = True
99
100 help_msg = 'List all known jobs by Avocado'
101 subcommands.add_parser('list', help=help_msg)
102
103 help_msg = ('Show details about a specific job. When passing a Job '
104 'ID, you can use any Job Reference (job_id, "latest", '
105 'or job results path).')
106 show_parser = subcommands.add_parser('show', help=help_msg)
107 settings.register_option(section='jobs.show',
108 key='job_id',
109 help_msg='JOB id',
110 metavar='JOBID',
111 default='latest',
112 nargs='?',
113 positional_arg=True,
114 parser=show_parser)
115 help_msg = ('Download output files generated by tests on '
116 'AVOCADO_TEST_OUTPUT_DIR')
117 output_files_parser = subcommands.add_parser('get-output-files',
118 help=help_msg)
119 settings.register_option(section='jobs.get.output_files',
120 key='job_id',
121 help_msg='JOB id',
122 metavar='JOBID',
123 default=None,
124 positional_arg=True,
125 parser=output_files_parser)
126
127 settings.register_option(section='jobs.get.output_files',
128 key='destination',
129 help_msg='Destination path',
130 metavar='DESTINATION',
131 default=None,
132 positional_arg=True,
133 parser=output_files_parser)
134
135 def handle_list_command(self, jobs_results):
136 """Called when 'avocado jobs list' command is executed."""
137
138 for filename in jobs_results.values():
139 with open(filename, 'r') as fp:
140 job = json.load(fp)
141 try:
142 started_ts = job['tests'][0]['start']
143 started = datetime.fromtimestamp(started_ts)
144 except IndexError:
145 continue
146 LOG_UI.info("%-40s %-26s %3s (%s/%s/%s/%s)",
147 job['job_id'],
148 str(started),
149 job['total'],
150 job['pass'],
151 job['skip'],
152 job['errors'],
153 job['failures'])
154
155 return exit_codes.AVOCADO_ALL_OK
156
157 def _download_tests(self, tests, destination, job_id, spawner):
158 for test in tests:
159 test_id = test.get('id')
160 LOG_UI.info("Downloading files for test %s", test_id)
161 try:
162 files_buffers = spawner().stream_output(job_id, test_id)
163 for filename, stream in files_buffers:
164 dest = os.path.join(destination, filename)
165 self._save_stream_to_file(stream, dest)
166 except SpawnerException as ex:
167 LOG_UI.error("Error: Failed to download: %s. Exiting...", ex)
168 return exit_codes.AVOCADO_GENERIC_CRASH
169 return exit_codes.AVOCADO_ALL_OK
170
171 def handle_output_files_command(self, config):
172 """Called when 'avocado jobs get-output-files' command is executed."""
173
174 job_id = config.get('jobs.get.output_files.job_id')
175 destination = config.get('jobs.get.output_files.destination')
176
177 results_dir = get_job_results_dir(job_id)
178 results_file = os.path.join(results_dir, 'results.json')
179 config_file = os.path.join(results_dir, 'jobdata/args.json')
180
181 try:
182 config_data = self._get_data_from_file(config_file)
183 results_data = self._get_data_from_file(results_file)
184 except FileNotFoundError as ex:
185 LOG_UI.error("Could not get job information: %s", ex)
186 return exit_codes.AVOCADO_GENERIC_CRASH
187
188 spawners = {'process': ProcessSpawner,
189 'podman': PodmanSpawner}
190
191 spawner_name = config_data.get('nrun.spawner')
192 spawner = spawners.get(spawner_name)
193
194 if spawner is None:
195 msg = ("Could not find the spawner for job %s. This command is "
196 "experimental and only supported when job executed with "
197 "the Spawner architecture.")
198 LOG_UI.error(msg, job_id)
199 return exit_codes.AVOCADO_GENERIC_CRASH
200
201 return self._download_tests(results_data.get('tests'),
202 destination,
203 job_id,
204 spawner)
205
206 def handle_show_command(self, config):
207 """Called when 'avocado jobs show' command is executed."""
208
209 job_id = config.get('jobs.show.job_id')
210 results_dir = get_job_results_dir(job_id)
211 if results_dir is None:
212 LOG_UI.error("Error: Job %s not found", job_id)
213 return exit_codes.AVOCADO_GENERIC_CRASH
214
215 results_file = os.path.join(results_dir, 'results.json')
216 config_file = os.path.join(results_dir, 'jobdata/args.json')
217 try:
218 results_data = self._get_data_from_file(results_file)
219 except FileNotFoundError as ex:
220 # Results data are important and should exit if not found
221 LOG_UI.error(ex)
222 return exit_codes.AVOCADO_GENERIC_CRASH
223
224 try:
225 config_data = self._get_data_from_file(config_file)
226 except FileNotFoundError:
227 pass
228
229 data = {'JOB ID': job_id,
230 'JOB LOG': results_data.get('debuglog'),
231 'SPAWNER': config_data.get('nrun.spawner', 'unknown')}
232
233 # We could improve this soon with more data and colors
234 self._print_job_details(data)
235 LOG_UI.info("")
236 self._print_job_tests(results_data.get('tests'))
237 results = ('PASS %d | ERROR %d | FAIL %d | SKIP %d |'
238 'WARN %d | INTERRUPT %s | CANCEL %s')
239 results %= (results_data.get('pass', 0),
240 results_data.get('error', 0),
241 results_data.get('failures', 0),
242 results_data.get('skip', 0),
243 results_data.get('warn', 0),
244 results_data.get('interrupt', 0),
245 results_data.get('cancel', 0))
246 self._print_job_details({'RESULTS': results})
247 return exit_codes.AVOCADO_ALL_OK
248
249 def run(self, config):
250 results = {}
251
252 jobs_dir = get_logs_dir()
253 for result in glob(os.path.join(jobs_dir, '*/results.json')):
254 with open(result, 'r') as fp:
255 job = json.load(fp)
256 results[job['job_id']] = result
257
258 subcommand = config.get('jobs_subcommand')
259 if subcommand == 'list':
260 return self.handle_list_command(results)
261 elif subcommand == 'show':
262 return self.handle_show_command(config)
263 elif subcommand == 'get-output-files':
264 return self.handle_output_files_command(config)
265 return exit_codes.AVOCADO_ALL_OK
266
[end of avocado/plugins/jobs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/avocado/plugins/jobs.py b/avocado/plugins/jobs.py
--- a/avocado/plugins/jobs.py
+++ b/avocado/plugins/jobs.py
@@ -77,9 +77,7 @@
Directory must exists before calling this function.
"""
dirname = os.path.dirname(filename)
- if not os.path.isdir(dirname):
- LOG_UI.error("%s does not exist. Exiting...", dirname)
- return exit_codes.AVOCADO_GENERIC_CRASH
+ os.makedirs(dirname, exist_ok=True)
with open(filename, 'ab') as output_file:
output_file.write(stream)
@@ -161,7 +159,9 @@
try:
files_buffers = spawner().stream_output(job_id, test_id)
for filename, stream in files_buffers:
- dest = os.path.join(destination, filename)
+ dest = os.path.join(destination,
+ test_id.replace('/', '_'),
+ filename)
self._save_stream_to_file(stream, dest)
except SpawnerException as ex:
LOG_UI.error("Error: Failed to download: %s. Exiting...", ex)
|
{"golden_diff": "diff --git a/avocado/plugins/jobs.py b/avocado/plugins/jobs.py\n--- a/avocado/plugins/jobs.py\n+++ b/avocado/plugins/jobs.py\n@@ -77,9 +77,7 @@\n Directory must exists before calling this function.\n \"\"\"\n dirname = os.path.dirname(filename)\n- if not os.path.isdir(dirname):\n- LOG_UI.error(\"%s does not exist. Exiting...\", dirname)\n- return exit_codes.AVOCADO_GENERIC_CRASH\n+ os.makedirs(dirname, exist_ok=True)\n \n with open(filename, 'ab') as output_file:\n output_file.write(stream)\n@@ -161,7 +159,9 @@\n try:\n files_buffers = spawner().stream_output(job_id, test_id)\n for filename, stream in files_buffers:\n- dest = os.path.join(destination, filename)\n+ dest = os.path.join(destination,\n+ test_id.replace('/', '_'),\n+ filename)\n self._save_stream_to_file(stream, dest)\n except SpawnerException as ex:\n LOG_UI.error(\"Error: Failed to download: %s. Exiting...\", ex)\n", "issue": "jobs get-output-files should split downloaded files into subdirectories\n> ```\r\n> ll /home/cleber/avocado/job-results/job-2020-06-04T15.17-9bd5b5e\r\n> total 708\r\n> -rw-rw-r--. 1 cleber cleber 41 Jun 4 15:17 id\r\n> drwxrwxr-x. 2 cleber cleber 4096 Jun 4 15:17 jobdata\r\n> -rw-rw-r--. 1 cleber cleber 15818 Jun 4 15:17 job.log\r\n> -rw-rw-r--. 1 cleber cleber 668480 Jun 4 15:17 results.html\r\n> -rw-rw-r--. 1 cleber cleber 1029 Jun 4 15:17 results.json\r\n> -rw-rw-r--. 1 cleber cleber 36 Jun 4 15:17 results.tap\r\n> -rw-rw-r--. 1 cleber cleber 345 Jun 4 15:17 results.xml\r\n> drwxrwxr-x. 5 cleber cleber 4096 Jun 4 15:17 sysinfo\r\n> -rw-rw-r--. 1 cleber cleber 16 Jun 4 15:18 test2_created\r\n> -rw-rw-r--. 1 cleber cleber 16 Jun 4 15:18 test_created\r\n> drwxrwxr-x. 4 cleber cleber 4096 Jun 4 15:17 test-results\r\n> ```\r\n> \r\n> I had the expectation that, `test_created` would to to `test-results/1-_tmp_t.sh/` and `test2_created` would go to `test-results/2-_tmp_t2.sh/`. Is this expected now? Yes, no, in the future?\n", "before_files": [{"content": "# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n#\n# See LICENSE for more details.\n#\n# Copyright: Red Hat Inc. 2020\n# Authors: Beraldo Leal <[email protected]>\n\n\"\"\"\nJobs subcommand\n\"\"\"\nimport json\nimport os\n\nfrom datetime import datetime\nfrom glob import glob\n\nfrom avocado.core import exit_codes\nfrom avocado.core import output\nfrom avocado.core.data_dir import get_job_results_dir, get_logs_dir\nfrom avocado.core.future.settings import settings\nfrom avocado.core.output import LOG_UI\nfrom avocado.core.plugin_interfaces import CLICmd\nfrom avocado.core.spawners.exceptions import SpawnerException\nfrom avocado.core.spawners.podman import PodmanSpawner\nfrom avocado.core.spawners.process import ProcessSpawner\nfrom avocado.utils import astring\n\n\nclass Jobs(CLICmd):\n \"\"\"\n Implements the avocado 'jobs' subcommand\n \"\"\"\n name = 'jobs'\n description = 'Manage Avocado jobs'\n\n def _get_data_from_file(self, filename):\n if not filename or not os.path.isfile(filename):\n raise FileNotFoundError('File not found {}'.format(filename))\n\n with open(filename, 'r') as fp:\n return json.load(fp)\n\n def _print_job_details(self, details):\n for key, value in details.items():\n LOG_UI.info(\"%-12s: %s\", key, value)\n\n def _print_job_tests(self, tests):\n test_matrix = []\n date_fmt = \"%Y/%m/%d %H:%M:%S\"\n for test in tests:\n status = test.get('status')\n decorator = output.TEST_STATUS_DECORATOR_MAPPING.get(status)\n end = datetime.fromtimestamp(test.get('end'))\n test_matrix.append((test.get('id'),\n end.strftime(date_fmt),\n \"%5f\" % float(test.get('time')),\n decorator(status, '')))\n header = (output.TERM_SUPPORT.header_str('Test ID'),\n output.TERM_SUPPORT.header_str('End Time'),\n output.TERM_SUPPORT.header_str('Run Time'),\n output.TERM_SUPPORT.header_str('Status'))\n for line in astring.iter_tabular_output(test_matrix,\n header=header,\n strip=True):\n LOG_UI.debug(line)\n\n def _save_stream_to_file(self, stream, filename):\n \"\"\"Save stream to a file.\n\n Directory must exists before calling this function.\n \"\"\"\n dirname = os.path.dirname(filename)\n if not os.path.isdir(dirname):\n LOG_UI.error(\"%s does not exist. Exiting...\", dirname)\n return exit_codes.AVOCADO_GENERIC_CRASH\n\n with open(filename, 'ab') as output_file:\n output_file.write(stream)\n\n def configure(self, parser):\n \"\"\"\n Add the subparser for the assets action.\n\n :param parser: The Avocado command line application parser\n :type parser: :class:`avocado.core.parser.ArgumentParser`\n \"\"\"\n parser = super(Jobs, self).configure(parser)\n\n subcommands = parser.add_subparsers(dest='jobs_subcommand',\n metavar='sub-command')\n subcommands.required = True\n\n help_msg = 'List all known jobs by Avocado'\n subcommands.add_parser('list', help=help_msg)\n\n help_msg = ('Show details about a specific job. When passing a Job '\n 'ID, you can use any Job Reference (job_id, \"latest\", '\n 'or job results path).')\n show_parser = subcommands.add_parser('show', help=help_msg)\n settings.register_option(section='jobs.show',\n key='job_id',\n help_msg='JOB id',\n metavar='JOBID',\n default='latest',\n nargs='?',\n positional_arg=True,\n parser=show_parser)\n help_msg = ('Download output files generated by tests on '\n 'AVOCADO_TEST_OUTPUT_DIR')\n output_files_parser = subcommands.add_parser('get-output-files',\n help=help_msg)\n settings.register_option(section='jobs.get.output_files',\n key='job_id',\n help_msg='JOB id',\n metavar='JOBID',\n default=None,\n positional_arg=True,\n parser=output_files_parser)\n\n settings.register_option(section='jobs.get.output_files',\n key='destination',\n help_msg='Destination path',\n metavar='DESTINATION',\n default=None,\n positional_arg=True,\n parser=output_files_parser)\n\n def handle_list_command(self, jobs_results):\n \"\"\"Called when 'avocado jobs list' command is executed.\"\"\"\n\n for filename in jobs_results.values():\n with open(filename, 'r') as fp:\n job = json.load(fp)\n try:\n started_ts = job['tests'][0]['start']\n started = datetime.fromtimestamp(started_ts)\n except IndexError:\n continue\n LOG_UI.info(\"%-40s %-26s %3s (%s/%s/%s/%s)\",\n job['job_id'],\n str(started),\n job['total'],\n job['pass'],\n job['skip'],\n job['errors'],\n job['failures'])\n\n return exit_codes.AVOCADO_ALL_OK\n\n def _download_tests(self, tests, destination, job_id, spawner):\n for test in tests:\n test_id = test.get('id')\n LOG_UI.info(\"Downloading files for test %s\", test_id)\n try:\n files_buffers = spawner().stream_output(job_id, test_id)\n for filename, stream in files_buffers:\n dest = os.path.join(destination, filename)\n self._save_stream_to_file(stream, dest)\n except SpawnerException as ex:\n LOG_UI.error(\"Error: Failed to download: %s. Exiting...\", ex)\n return exit_codes.AVOCADO_GENERIC_CRASH\n return exit_codes.AVOCADO_ALL_OK\n\n def handle_output_files_command(self, config):\n \"\"\"Called when 'avocado jobs get-output-files' command is executed.\"\"\"\n\n job_id = config.get('jobs.get.output_files.job_id')\n destination = config.get('jobs.get.output_files.destination')\n\n results_dir = get_job_results_dir(job_id)\n results_file = os.path.join(results_dir, 'results.json')\n config_file = os.path.join(results_dir, 'jobdata/args.json')\n\n try:\n config_data = self._get_data_from_file(config_file)\n results_data = self._get_data_from_file(results_file)\n except FileNotFoundError as ex:\n LOG_UI.error(\"Could not get job information: %s\", ex)\n return exit_codes.AVOCADO_GENERIC_CRASH\n\n spawners = {'process': ProcessSpawner,\n 'podman': PodmanSpawner}\n\n spawner_name = config_data.get('nrun.spawner')\n spawner = spawners.get(spawner_name)\n\n if spawner is None:\n msg = (\"Could not find the spawner for job %s. This command is \"\n \"experimental and only supported when job executed with \"\n \"the Spawner architecture.\")\n LOG_UI.error(msg, job_id)\n return exit_codes.AVOCADO_GENERIC_CRASH\n\n return self._download_tests(results_data.get('tests'),\n destination,\n job_id,\n spawner)\n\n def handle_show_command(self, config):\n \"\"\"Called when 'avocado jobs show' command is executed.\"\"\"\n\n job_id = config.get('jobs.show.job_id')\n results_dir = get_job_results_dir(job_id)\n if results_dir is None:\n LOG_UI.error(\"Error: Job %s not found\", job_id)\n return exit_codes.AVOCADO_GENERIC_CRASH\n\n results_file = os.path.join(results_dir, 'results.json')\n config_file = os.path.join(results_dir, 'jobdata/args.json')\n try:\n results_data = self._get_data_from_file(results_file)\n except FileNotFoundError as ex:\n # Results data are important and should exit if not found\n LOG_UI.error(ex)\n return exit_codes.AVOCADO_GENERIC_CRASH\n\n try:\n config_data = self._get_data_from_file(config_file)\n except FileNotFoundError:\n pass\n\n data = {'JOB ID': job_id,\n 'JOB LOG': results_data.get('debuglog'),\n 'SPAWNER': config_data.get('nrun.spawner', 'unknown')}\n\n # We could improve this soon with more data and colors\n self._print_job_details(data)\n LOG_UI.info(\"\")\n self._print_job_tests(results_data.get('tests'))\n results = ('PASS %d | ERROR %d | FAIL %d | SKIP %d |'\n 'WARN %d | INTERRUPT %s | CANCEL %s')\n results %= (results_data.get('pass', 0),\n results_data.get('error', 0),\n results_data.get('failures', 0),\n results_data.get('skip', 0),\n results_data.get('warn', 0),\n results_data.get('interrupt', 0),\n results_data.get('cancel', 0))\n self._print_job_details({'RESULTS': results})\n return exit_codes.AVOCADO_ALL_OK\n\n def run(self, config):\n results = {}\n\n jobs_dir = get_logs_dir()\n for result in glob(os.path.join(jobs_dir, '*/results.json')):\n with open(result, 'r') as fp:\n job = json.load(fp)\n results[job['job_id']] = result\n\n subcommand = config.get('jobs_subcommand')\n if subcommand == 'list':\n return self.handle_list_command(results)\n elif subcommand == 'show':\n return self.handle_show_command(config)\n elif subcommand == 'get-output-files':\n return self.handle_output_files_command(config)\n return exit_codes.AVOCADO_ALL_OK\n", "path": "avocado/plugins/jobs.py"}]}
| 3,868 | 247 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.