problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_17691
|
rasdani/github-patches
|
git_diff
|
docker__docker-py-867
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
No documentation for network api
The following have missing documentation ([readthedocs](http://docker-py.readthedocs.org/)).
- [x] `Client.networks`
- [x] `Client.create_network`
- [x] `Client.remove_network`
- [x] `Client.inspect_network`
- [x] `Client.connect_container_to_network`
- [x] `Client.disconnect_container_from_network`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/api/volume.py`
Content:
```
1 from .. import utils
2
3
4 class VolumeApiMixin(object):
5 @utils.minimum_version('1.21')
6 def volumes(self, filters=None):
7 params = {
8 'filters': utils.convert_filters(filters) if filters else None
9 }
10 url = self._url('/volumes')
11 return self._result(self._get(url, params=params), True)
12
13 @utils.minimum_version('1.21')
14 def create_volume(self, name, driver=None, driver_opts=None):
15 url = self._url('/volumes/create')
16 if driver_opts is not None and not isinstance(driver_opts, dict):
17 raise TypeError('driver_opts must be a dictionary')
18
19 data = {
20 'Name': name,
21 'Driver': driver,
22 'DriverOpts': driver_opts,
23 }
24 return self._result(self._post_json(url, data=data), True)
25
26 @utils.minimum_version('1.21')
27 def inspect_volume(self, name):
28 url = self._url('/volumes/{0}', name)
29 return self._result(self._get(url), True)
30
31 @utils.minimum_version('1.21')
32 def remove_volume(self, name):
33 url = self._url('/volumes/{0}', name)
34 resp = self._delete(url)
35 self._raise_for_status(resp)
36 return True
37
```
Path: `docker/api/network.py`
Content:
```
1 import json
2
3 from ..utils import check_resource, minimum_version
4
5
6 class NetworkApiMixin(object):
7 @minimum_version('1.21')
8 def networks(self, names=None, ids=None):
9 filters = {}
10 if names:
11 filters['name'] = names
12 if ids:
13 filters['id'] = ids
14
15 params = {'filters': json.dumps(filters)}
16
17 url = self._url("/networks")
18 res = self._get(url, params=params)
19 return self._result(res, json=True)
20
21 @minimum_version('1.21')
22 def create_network(self, name, driver=None):
23 data = {
24 'name': name,
25 'driver': driver,
26 }
27 url = self._url("/networks/create")
28 res = self._post_json(url, data=data)
29 return self._result(res, json=True)
30
31 @minimum_version('1.21')
32 def remove_network(self, net_id):
33 url = self._url("/networks/{0}", net_id)
34 res = self._delete(url)
35 self._raise_for_status(res)
36
37 @minimum_version('1.21')
38 def inspect_network(self, net_id):
39 url = self._url("/networks/{0}", net_id)
40 res = self._get(url)
41 return self._result(res, json=True)
42
43 @check_resource
44 @minimum_version('1.21')
45 def connect_container_to_network(self, container, net_id):
46 data = {"container": container}
47 url = self._url("/networks/{0}/connect", net_id)
48 self._post_json(url, data=data)
49
50 @check_resource
51 @minimum_version('1.21')
52 def disconnect_container_from_network(self, container, net_id):
53 data = {"container": container}
54 url = self._url("/networks/{0}/disconnect", net_id)
55 self._post_json(url, data=data)
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docker/api/network.py b/docker/api/network.py
--- a/docker/api/network.py
+++ b/docker/api/network.py
@@ -19,10 +19,14 @@
return self._result(res, json=True)
@minimum_version('1.21')
- def create_network(self, name, driver=None):
+ def create_network(self, name, driver=None, options=None):
+ if options is not None and not isinstance(options, dict):
+ raise TypeError('options must be a dictionary')
+
data = {
'name': name,
'driver': driver,
+ 'options': options
}
url = self._url("/networks/create")
res = self._post_json(url, data=data)
diff --git a/docker/api/volume.py b/docker/api/volume.py
--- a/docker/api/volume.py
+++ b/docker/api/volume.py
@@ -33,4 +33,3 @@
url = self._url('/volumes/{0}', name)
resp = self._delete(url)
self._raise_for_status(resp)
- return True
|
{"golden_diff": "diff --git a/docker/api/network.py b/docker/api/network.py\n--- a/docker/api/network.py\n+++ b/docker/api/network.py\n@@ -19,10 +19,14 @@\n return self._result(res, json=True)\n \n @minimum_version('1.21')\n- def create_network(self, name, driver=None):\n+ def create_network(self, name, driver=None, options=None):\n+ if options is not None and not isinstance(options, dict):\n+ raise TypeError('options must be a dictionary')\n+\n data = {\n 'name': name,\n 'driver': driver,\n+ 'options': options\n }\n url = self._url(\"/networks/create\")\n res = self._post_json(url, data=data)\ndiff --git a/docker/api/volume.py b/docker/api/volume.py\n--- a/docker/api/volume.py\n+++ b/docker/api/volume.py\n@@ -33,4 +33,3 @@\n url = self._url('/volumes/{0}', name)\n resp = self._delete(url)\n self._raise_for_status(resp)\n- return True\n", "issue": "No documentation for network api\nThe following have missing documentation ([readthedocs](http://docker-py.readthedocs.org/)).\n- [x] `Client.networks`\n- [x] `Client.create_network`\n- [x] `Client.remove_network`\n- [x] `Client.inspect_network`\n- [x] `Client.connect_container_to_network`\n- [x] `Client.disconnect_container_from_network`\n\n", "before_files": [{"content": "from .. import utils\n\n\nclass VolumeApiMixin(object):\n @utils.minimum_version('1.21')\n def volumes(self, filters=None):\n params = {\n 'filters': utils.convert_filters(filters) if filters else None\n }\n url = self._url('/volumes')\n return self._result(self._get(url, params=params), True)\n\n @utils.minimum_version('1.21')\n def create_volume(self, name, driver=None, driver_opts=None):\n url = self._url('/volumes/create')\n if driver_opts is not None and not isinstance(driver_opts, dict):\n raise TypeError('driver_opts must be a dictionary')\n\n data = {\n 'Name': name,\n 'Driver': driver,\n 'DriverOpts': driver_opts,\n }\n return self._result(self._post_json(url, data=data), True)\n\n @utils.minimum_version('1.21')\n def inspect_volume(self, name):\n url = self._url('/volumes/{0}', name)\n return self._result(self._get(url), True)\n\n @utils.minimum_version('1.21')\n def remove_volume(self, name):\n url = self._url('/volumes/{0}', name)\n resp = self._delete(url)\n self._raise_for_status(resp)\n return True\n", "path": "docker/api/volume.py"}, {"content": "import json\n\nfrom ..utils import check_resource, minimum_version\n\n\nclass NetworkApiMixin(object):\n @minimum_version('1.21')\n def networks(self, names=None, ids=None):\n filters = {}\n if names:\n filters['name'] = names\n if ids:\n filters['id'] = ids\n\n params = {'filters': json.dumps(filters)}\n\n url = self._url(\"/networks\")\n res = self._get(url, params=params)\n return self._result(res, json=True)\n\n @minimum_version('1.21')\n def create_network(self, name, driver=None):\n data = {\n 'name': name,\n 'driver': driver,\n }\n url = self._url(\"/networks/create\")\n res = self._post_json(url, data=data)\n return self._result(res, json=True)\n\n @minimum_version('1.21')\n def remove_network(self, net_id):\n url = self._url(\"/networks/{0}\", net_id)\n res = self._delete(url)\n self._raise_for_status(res)\n\n @minimum_version('1.21')\n def inspect_network(self, net_id):\n url = self._url(\"/networks/{0}\", net_id)\n res = self._get(url)\n return self._result(res, json=True)\n\n @check_resource\n @minimum_version('1.21')\n def connect_container_to_network(self, container, net_id):\n data = {\"container\": container}\n url = self._url(\"/networks/{0}/connect\", net_id)\n self._post_json(url, data=data)\n\n @check_resource\n @minimum_version('1.21')\n def disconnect_container_from_network(self, container, net_id):\n data = {\"container\": container}\n url = self._url(\"/networks/{0}/disconnect\", net_id)\n self._post_json(url, data=data)\n", "path": "docker/api/network.py"}], "after_files": [{"content": "from .. import utils\n\n\nclass VolumeApiMixin(object):\n @utils.minimum_version('1.21')\n def volumes(self, filters=None):\n params = {\n 'filters': utils.convert_filters(filters) if filters else None\n }\n url = self._url('/volumes')\n return self._result(self._get(url, params=params), True)\n\n @utils.minimum_version('1.21')\n def create_volume(self, name, driver=None, driver_opts=None):\n url = self._url('/volumes/create')\n if driver_opts is not None and not isinstance(driver_opts, dict):\n raise TypeError('driver_opts must be a dictionary')\n\n data = {\n 'Name': name,\n 'Driver': driver,\n 'DriverOpts': driver_opts,\n }\n return self._result(self._post_json(url, data=data), True)\n\n @utils.minimum_version('1.21')\n def inspect_volume(self, name):\n url = self._url('/volumes/{0}', name)\n return self._result(self._get(url), True)\n\n @utils.minimum_version('1.21')\n def remove_volume(self, name):\n url = self._url('/volumes/{0}', name)\n resp = self._delete(url)\n self._raise_for_status(resp)\n", "path": "docker/api/volume.py"}, {"content": "import json\n\nfrom ..utils import check_resource, minimum_version\n\n\nclass NetworkApiMixin(object):\n @minimum_version('1.21')\n def networks(self, names=None, ids=None):\n filters = {}\n if names:\n filters['name'] = names\n if ids:\n filters['id'] = ids\n\n params = {'filters': json.dumps(filters)}\n\n url = self._url(\"/networks\")\n res = self._get(url, params=params)\n return self._result(res, json=True)\n\n @minimum_version('1.21')\n def create_network(self, name, driver=None, options=None):\n if options is not None and not isinstance(options, dict):\n raise TypeError('options must be a dictionary')\n\n data = {\n 'name': name,\n 'driver': driver,\n 'options': options\n }\n url = self._url(\"/networks/create\")\n res = self._post_json(url, data=data)\n return self._result(res, json=True)\n\n @minimum_version('1.21')\n def remove_network(self, net_id):\n url = self._url(\"/networks/{0}\", net_id)\n res = self._delete(url)\n self._raise_for_status(res)\n\n @minimum_version('1.21')\n def inspect_network(self, net_id):\n url = self._url(\"/networks/{0}\", net_id)\n res = self._get(url)\n return self._result(res, json=True)\n\n @check_resource\n @minimum_version('1.21')\n def connect_container_to_network(self, container, net_id):\n data = {\"container\": container}\n url = self._url(\"/networks/{0}/connect\", net_id)\n self._post_json(url, data=data)\n\n @check_resource\n @minimum_version('1.21')\n def disconnect_container_from_network(self, container, net_id):\n data = {\"container\": container}\n url = self._url(\"/networks/{0}/disconnect\", net_id)\n self._post_json(url, data=data)\n", "path": "docker/api/network.py"}]}
| 1,231 | 243 |
gh_patches_debug_5886
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-215
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Not stashing changes before installing
Hi,
I'm regularly running into this situation: I have pending changes, I run `git commit -a`, and pre-commit tries to install its hooks:
```
[INFO] Initializing environment for git://github.com/pre-commit/pre-commit-hooks.
An unexpected error has occurred: CalledProcessError: Command: ['git', 'checkout', 'd3db0385825d4c082bc7117c090ac16cb4840f3e']
Return code: 1
Expected return code: 0
Output: (none)
Errors:
error: Your local changes to the following files would be overwritten by checkout:
.pre-commit-config.yaml
.travis.yml
CHANGELOG
README.md
hooks.yaml
pre_commit_hooks/autopep8_wrapper.py
pre_commit_hooks/check_json.py
pre_commit_hooks/check_yaml.py
pre_commit_hooks/debug_statement_hook.py
pre_commit_hooks/end_of_file_fixer.py
pre_commit_hooks/tests_should_end_in_test.py
pre_commit_hooks/trailing_whitespace_fixer.py
pre_commit_hooks/util.py
pylintrc
requirements-dev.txt
setup.py
testing/util.py
tests/autopep8_wrapper_test.py
tests/debug_statement_hook_test.py
tests/end_of_file_fixer_test.py
tests/tests_should_end_in_test_test.py
tests/trailing_whitespace_fixer_test.py
tests/util_test.py
tox.ini
Please, commit your changes or stash them before you can switch branches.
Aborting
Check the log at ~/.pre-commit/pre-commit.log
```
The log contents are
```
An unexpected error has occurred: CalledProcessError: Command: ['git', 'checkout', 'd3db0385825d4c082bc7117c090ac16cb4840f3e']
Return code: 1
Expected return code: 0
Output: (none)
Errors:
error: Your local changes to the following files would be overwritten by checkout:
.pre-commit-config.yaml
.travis.yml
CHANGELOG
README.md
hooks.yaml
pre_commit_hooks/autopep8_wrapper.py
pre_commit_hooks/check_json.py
pre_commit_hooks/check_yaml.py
pre_commit_hooks/debug_statement_hook.py
pre_commit_hooks/end_of_file_fixer.py
pre_commit_hooks/tests_should_end_in_test.py
pre_commit_hooks/trailing_whitespace_fixer.py
pre_commit_hooks/util.py
pylintrc
requirements-dev.txt
setup.py
testing/util.py
tests/autopep8_wrapper_test.py
tests/debug_statement_hook_test.py
tests/end_of_file_fixer_test.py
tests/tests_should_end_in_test_test.py
tests/trailing_whitespace_fixer_test.py
tests/util_test.py
tox.ini
Please, commit your changes or stash them before you can switch branches.
Aborting
Traceback (most recent call last):
File "/home/qdm/workspace/web/pre-commit/pre_commit/error_handler.py", line 34, in error_handler
yield
File "/home/qdm/workspace/web/pre-commit/pre_commit/main.py", line 129, in main
return run(runner, args)
File "/home/qdm/workspace/web/pre-commit/pre_commit/commands/run.py", line 165, in run
return _run_hooks(runner, args, write=write, environ=environ)
File "/home/qdm/workspace/web/pre-commit/pre_commit/commands/run.py", line 115, in _run_hooks
for repo in runner.repositories:
File "/usr/lib/python3.4/site-packages/cached_property.py", line 26, in __get__
value = obj.__dict__[self.func.__name__] = self.func(obj)
File "/home/qdm/workspace/web/pre-commit/pre_commit/runner.py", line 43, in repositories
repository.require_installed()
File "/home/qdm/workspace/web/pre-commit/pre_commit/repository.py", line 64, in require_installed
self.install()
File "/home/qdm/workspace/web/pre-commit/pre_commit/repository.py", line 78, in install
for language_name, _ in self.languages
File "/usr/lib/python3.4/site-packages/cached_property.py", line 26, in __get__
value = obj.__dict__[self.func.__name__] = self.func(obj)
File "/home/qdm/workspace/web/pre-commit/pre_commit/repository.py", line 41, in languages
for _, hook in self.hooks
File "/usr/lib/python3.4/site-packages/cached_property.py", line 26, in __get__
value = obj.__dict__[self.func.__name__] = self.func(obj)
File "/home/qdm/workspace/web/pre-commit/pre_commit/repository.py", line 49, in hooks
for hook in self.repo_config['hooks']
File "/home/qdm/workspace/web/pre-commit/pre_commit/repository.py", line 49, in <genexpr>
for hook in self.repo_config['hooks']
File "/usr/lib/python3.4/site-packages/cached_property.py", line 26, in __get__
value = obj.__dict__[self.func.__name__] = self.func(obj)
File "/home/qdm/workspace/web/pre-commit/pre_commit/manifest.py", line 24, in hooks
return dict((hook['id'], hook) for hook in self.manifest_contents)
File "/usr/lib/python3.4/site-packages/cached_property.py", line 26, in __get__
value = obj.__dict__[self.func.__name__] = self.func(obj)
File "/home/qdm/workspace/web/pre-commit/pre_commit/manifest.py", line 18, in manifest_contents
self.repo_path_getter.repo_path, C.MANIFEST_FILE,
File "/usr/lib/python3.4/site-packages/cached_property.py", line 26, in __get__
value = obj.__dict__[self.func.__name__] = self.func(obj)
File "/home/qdm/workspace/web/pre-commit/pre_commit/store.py", line 46, in repo_path
return self._store.clone(self._repo, self._sha)
File "/home/qdm/workspace/web/pre-commit/pre_commit/store.py", line 119, in clone
cmd_output('git', 'checkout', sha)
File "/home/qdm/workspace/web/pre-commit/pre_commit/util.py", line 160, in cmd_output
returncode, cmd, retcode, output=(stdout, stderr),
pre_commit.util.CalledProcessError: Command: ['git', 'checkout', 'd3db0385825d4c082bc7117c090ac16cb4840f3e']
Return code: 1
Expected return code: 0
Output: (none)
Errors:
error: Your local changes to the following files would be overwritten by checkout:
.pre-commit-config.yaml
.travis.yml
CHANGELOG
README.md
hooks.yaml
pre_commit_hooks/autopep8_wrapper.py
pre_commit_hooks/check_json.py
pre_commit_hooks/check_yaml.py
pre_commit_hooks/debug_statement_hook.py
pre_commit_hooks/end_of_file_fixer.py
pre_commit_hooks/tests_should_end_in_test.py
pre_commit_hooks/trailing_whitespace_fixer.py
pre_commit_hooks/util.py
pylintrc
requirements-dev.txt
setup.py
testing/util.py
tests/autopep8_wrapper_test.py
tests/debug_statement_hook_test.py
tests/end_of_file_fixer_test.py
tests/tests_should_end_in_test_test.py
tests/trailing_whitespace_fixer_test.py
tests/util_test.py
tox.ini
Please, commit your changes or stash them before you can switch branches.
Aborting
```
I think this is a regression from a previous version, it was more seamless then.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/store.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import contextlib
4 import io
5 import logging
6 import os
7 import os.path
8 import sqlite3
9 import tempfile
10
11 from cached_property import cached_property
12
13 from pre_commit.prefixed_command_runner import PrefixedCommandRunner
14 from pre_commit.util import clean_path_on_failure
15 from pre_commit.util import cmd_output
16 from pre_commit.util import cwd
17
18
19 logger = logging.getLogger('pre_commit')
20
21
22 def _get_default_directory():
23 """Returns the default directory for the Store. This is intentionally
24 underscored to indicate that `Store.get_default_directory` is the intended
25 way to get this information. This is also done so
26 `Store.get_default_directory` can be mocked in tests and
27 `_get_default_directory` can be tested.
28 """
29 return os.environ.get(
30 'PRE_COMMIT_HOME',
31 os.path.join(os.path.expanduser('~'), '.pre-commit'),
32 )
33
34
35 class Store(object):
36 get_default_directory = staticmethod(_get_default_directory)
37
38 class RepoPathGetter(object):
39 def __init__(self, repo, sha, store):
40 self._repo = repo
41 self._sha = sha
42 self._store = store
43
44 @cached_property
45 def repo_path(self):
46 return self._store.clone(self._repo, self._sha)
47
48 def __init__(self, directory=None):
49 if directory is None:
50 directory = self.get_default_directory()
51
52 self.directory = directory
53 self.__created = False
54
55 def _write_readme(self):
56 with io.open(os.path.join(self.directory, 'README'), 'w') as readme:
57 readme.write(
58 'This directory is maintained by the pre-commit project.\n'
59 'Learn more: https://github.com/pre-commit/pre-commit\n'
60 )
61
62 def _write_sqlite_db(self):
63 # To avoid a race where someone ^Cs between db creation and execution
64 # of the CREATE TABLE statement
65 fd, tmpfile = tempfile.mkstemp(dir=self.directory)
66 # We'll be managing this file ourselves
67 os.close(fd)
68 # sqlite doesn't close its fd with its contextmanager >.<
69 # contextlib.closing fixes this.
70 # See: http://stackoverflow.com/a/28032829/812183
71 with contextlib.closing(sqlite3.connect(tmpfile)) as db:
72 db.executescript(
73 'CREATE TABLE repos ('
74 ' repo CHAR(255) NOT NULL,'
75 ' ref CHAR(255) NOT NULL,'
76 ' path CHAR(255) NOT NULL,'
77 ' PRIMARY KEY (repo, ref)'
78 ');'
79 )
80
81 # Atomic file move
82 os.rename(tmpfile, self.db_path)
83
84 def _create(self):
85 if os.path.exists(self.db_path):
86 return
87 if not os.path.exists(self.directory):
88 os.makedirs(self.directory)
89 self._write_readme()
90 self._write_sqlite_db()
91
92 def require_created(self):
93 """Require the pre-commit file store to be created."""
94 if self.__created:
95 return
96
97 self._create()
98 self.__created = True
99
100 def clone(self, url, sha):
101 """Clone the given url and checkout the specific sha."""
102 self.require_created()
103
104 # Check if we already exist
105 with sqlite3.connect(self.db_path) as db:
106 result = db.execute(
107 'SELECT path FROM repos WHERE repo = ? AND ref = ?',
108 [url, sha],
109 ).fetchone()
110 if result:
111 return result[0]
112
113 logger.info('Initializing environment for {0}.'.format(url))
114
115 dir = tempfile.mkdtemp(prefix='repo', dir=self.directory)
116 with clean_path_on_failure(dir):
117 cmd_output('git', 'clone', '--no-checkout', url, dir)
118 with cwd(dir):
119 cmd_output('git', 'checkout', sha)
120
121 # Update our db with the created repo
122 with sqlite3.connect(self.db_path) as db:
123 db.execute(
124 'INSERT INTO repos (repo, ref, path) VALUES (?, ?, ?)',
125 [url, sha, dir],
126 )
127 return dir
128
129 def get_repo_path_getter(self, repo, sha):
130 return self.RepoPathGetter(repo, sha, self)
131
132 @cached_property
133 def cmd_runner(self):
134 return PrefixedCommandRunner(self.directory)
135
136 @cached_property
137 def db_path(self):
138 return os.path.join(self.directory, 'db.db')
139
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/store.py b/pre_commit/store.py
--- a/pre_commit/store.py
+++ b/pre_commit/store.py
@@ -116,7 +116,7 @@
with clean_path_on_failure(dir):
cmd_output('git', 'clone', '--no-checkout', url, dir)
with cwd(dir):
- cmd_output('git', 'checkout', sha)
+ cmd_output('git', 'reset', sha, '--hard')
# Update our db with the created repo
with sqlite3.connect(self.db_path) as db:
|
{"golden_diff": "diff --git a/pre_commit/store.py b/pre_commit/store.py\n--- a/pre_commit/store.py\n+++ b/pre_commit/store.py\n@@ -116,7 +116,7 @@\n with clean_path_on_failure(dir):\n cmd_output('git', 'clone', '--no-checkout', url, dir)\n with cwd(dir):\n- cmd_output('git', 'checkout', sha)\n+ cmd_output('git', 'reset', sha, '--hard')\n \n # Update our db with the created repo\n with sqlite3.connect(self.db_path) as db:\n", "issue": "Not stashing changes before installing\nHi,\n\nI'm regularly running into this situation: I have pending changes, I run `git commit -a`, and pre-commit tries to install its hooks:\n\n```\n[INFO] Initializing environment for git://github.com/pre-commit/pre-commit-hooks.\nAn unexpected error has occurred: CalledProcessError: Command: ['git', 'checkout', 'd3db0385825d4c082bc7117c090ac16cb4840f3e']\nReturn code: 1\nExpected return code: 0\nOutput: (none)\nErrors: \n error: Your local changes to the following files would be overwritten by checkout:\n .pre-commit-config.yaml\n .travis.yml\n CHANGELOG\n README.md\n hooks.yaml\n pre_commit_hooks/autopep8_wrapper.py\n pre_commit_hooks/check_json.py\n pre_commit_hooks/check_yaml.py\n pre_commit_hooks/debug_statement_hook.py\n pre_commit_hooks/end_of_file_fixer.py\n pre_commit_hooks/tests_should_end_in_test.py\n pre_commit_hooks/trailing_whitespace_fixer.py\n pre_commit_hooks/util.py\n pylintrc\n requirements-dev.txt\n setup.py\n testing/util.py\n tests/autopep8_wrapper_test.py\n tests/debug_statement_hook_test.py\n tests/end_of_file_fixer_test.py\n tests/tests_should_end_in_test_test.py\n tests/trailing_whitespace_fixer_test.py\n tests/util_test.py\n tox.ini\n Please, commit your changes or stash them before you can switch branches.\n Aborting\n\n\nCheck the log at ~/.pre-commit/pre-commit.log\n```\n\nThe log contents are \n\n```\nAn unexpected error has occurred: CalledProcessError: Command: ['git', 'checkout', 'd3db0385825d4c082bc7117c090ac16cb4840f3e']\nReturn code: 1\nExpected return code: 0\nOutput: (none)\nErrors: \n error: Your local changes to the following files would be overwritten by checkout:\n .pre-commit-config.yaml\n .travis.yml\n CHANGELOG\n README.md\n hooks.yaml\n pre_commit_hooks/autopep8_wrapper.py\n pre_commit_hooks/check_json.py\n pre_commit_hooks/check_yaml.py\n pre_commit_hooks/debug_statement_hook.py\n pre_commit_hooks/end_of_file_fixer.py\n pre_commit_hooks/tests_should_end_in_test.py\n pre_commit_hooks/trailing_whitespace_fixer.py\n pre_commit_hooks/util.py\n pylintrc\n requirements-dev.txt\n setup.py\n testing/util.py\n tests/autopep8_wrapper_test.py\n tests/debug_statement_hook_test.py\n tests/end_of_file_fixer_test.py\n tests/tests_should_end_in_test_test.py\n tests/trailing_whitespace_fixer_test.py\n tests/util_test.py\n tox.ini\n Please, commit your changes or stash them before you can switch branches.\n Aborting\n\n\nTraceback (most recent call last):\n File \"/home/qdm/workspace/web/pre-commit/pre_commit/error_handler.py\", line 34, in error_handler\n yield\n File \"/home/qdm/workspace/web/pre-commit/pre_commit/main.py\", line 129, in main\n return run(runner, args)\n File \"/home/qdm/workspace/web/pre-commit/pre_commit/commands/run.py\", line 165, in run\n return _run_hooks(runner, args, write=write, environ=environ)\n File \"/home/qdm/workspace/web/pre-commit/pre_commit/commands/run.py\", line 115, in _run_hooks\n for repo in runner.repositories:\n File \"/usr/lib/python3.4/site-packages/cached_property.py\", line 26, in __get__\n value = obj.__dict__[self.func.__name__] = self.func(obj)\n File \"/home/qdm/workspace/web/pre-commit/pre_commit/runner.py\", line 43, in repositories\n repository.require_installed()\n File \"/home/qdm/workspace/web/pre-commit/pre_commit/repository.py\", line 64, in require_installed\n self.install()\n File \"/home/qdm/workspace/web/pre-commit/pre_commit/repository.py\", line 78, in install\n for language_name, _ in self.languages\n File \"/usr/lib/python3.4/site-packages/cached_property.py\", line 26, in __get__\n value = obj.__dict__[self.func.__name__] = self.func(obj)\n File \"/home/qdm/workspace/web/pre-commit/pre_commit/repository.py\", line 41, in languages\n for _, hook in self.hooks\n File \"/usr/lib/python3.4/site-packages/cached_property.py\", line 26, in __get__\n value = obj.__dict__[self.func.__name__] = self.func(obj)\n File \"/home/qdm/workspace/web/pre-commit/pre_commit/repository.py\", line 49, in hooks\n for hook in self.repo_config['hooks']\n File \"/home/qdm/workspace/web/pre-commit/pre_commit/repository.py\", line 49, in <genexpr>\n for hook in self.repo_config['hooks']\n File \"/usr/lib/python3.4/site-packages/cached_property.py\", line 26, in __get__\n value = obj.__dict__[self.func.__name__] = self.func(obj)\n File \"/home/qdm/workspace/web/pre-commit/pre_commit/manifest.py\", line 24, in hooks\n return dict((hook['id'], hook) for hook in self.manifest_contents)\n File \"/usr/lib/python3.4/site-packages/cached_property.py\", line 26, in __get__\n value = obj.__dict__[self.func.__name__] = self.func(obj)\n File \"/home/qdm/workspace/web/pre-commit/pre_commit/manifest.py\", line 18, in manifest_contents\n self.repo_path_getter.repo_path, C.MANIFEST_FILE,\n File \"/usr/lib/python3.4/site-packages/cached_property.py\", line 26, in __get__\n value = obj.__dict__[self.func.__name__] = self.func(obj)\n File \"/home/qdm/workspace/web/pre-commit/pre_commit/store.py\", line 46, in repo_path\n return self._store.clone(self._repo, self._sha)\n File \"/home/qdm/workspace/web/pre-commit/pre_commit/store.py\", line 119, in clone\n cmd_output('git', 'checkout', sha)\n File \"/home/qdm/workspace/web/pre-commit/pre_commit/util.py\", line 160, in cmd_output\n returncode, cmd, retcode, output=(stdout, stderr),\npre_commit.util.CalledProcessError: Command: ['git', 'checkout', 'd3db0385825d4c082bc7117c090ac16cb4840f3e']\nReturn code: 1\nExpected return code: 0\nOutput: (none)\nErrors: \n error: Your local changes to the following files would be overwritten by checkout:\n .pre-commit-config.yaml\n .travis.yml\n CHANGELOG\n README.md\n hooks.yaml\n pre_commit_hooks/autopep8_wrapper.py\n pre_commit_hooks/check_json.py\n pre_commit_hooks/check_yaml.py\n pre_commit_hooks/debug_statement_hook.py\n pre_commit_hooks/end_of_file_fixer.py\n pre_commit_hooks/tests_should_end_in_test.py\n pre_commit_hooks/trailing_whitespace_fixer.py\n pre_commit_hooks/util.py\n pylintrc\n requirements-dev.txt\n setup.py\n testing/util.py\n tests/autopep8_wrapper_test.py\n tests/debug_statement_hook_test.py\n tests/end_of_file_fixer_test.py\n tests/tests_should_end_in_test_test.py\n tests/trailing_whitespace_fixer_test.py\n tests/util_test.py\n tox.ini\n Please, commit your changes or stash them before you can switch branches.\n Aborting\n```\n\nI think this is a regression from a previous version, it was more seamless then.\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport io\nimport logging\nimport os\nimport os.path\nimport sqlite3\nimport tempfile\n\nfrom cached_property import cached_property\n\nfrom pre_commit.prefixed_command_runner import PrefixedCommandRunner\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cwd\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _get_default_directory():\n \"\"\"Returns the default directory for the Store. This is intentionally\n underscored to indicate that `Store.get_default_directory` is the intended\n way to get this information. This is also done so\n `Store.get_default_directory` can be mocked in tests and\n `_get_default_directory` can be tested.\n \"\"\"\n return os.environ.get(\n 'PRE_COMMIT_HOME',\n os.path.join(os.path.expanduser('~'), '.pre-commit'),\n )\n\n\nclass Store(object):\n get_default_directory = staticmethod(_get_default_directory)\n\n class RepoPathGetter(object):\n def __init__(self, repo, sha, store):\n self._repo = repo\n self._sha = sha\n self._store = store\n\n @cached_property\n def repo_path(self):\n return self._store.clone(self._repo, self._sha)\n\n def __init__(self, directory=None):\n if directory is None:\n directory = self.get_default_directory()\n\n self.directory = directory\n self.__created = False\n\n def _write_readme(self):\n with io.open(os.path.join(self.directory, 'README'), 'w') as readme:\n readme.write(\n 'This directory is maintained by the pre-commit project.\\n'\n 'Learn more: https://github.com/pre-commit/pre-commit\\n'\n )\n\n def _write_sqlite_db(self):\n # To avoid a race where someone ^Cs between db creation and execution\n # of the CREATE TABLE statement\n fd, tmpfile = tempfile.mkstemp(dir=self.directory)\n # We'll be managing this file ourselves\n os.close(fd)\n # sqlite doesn't close its fd with its contextmanager >.<\n # contextlib.closing fixes this.\n # See: http://stackoverflow.com/a/28032829/812183\n with contextlib.closing(sqlite3.connect(tmpfile)) as db:\n db.executescript(\n 'CREATE TABLE repos ('\n ' repo CHAR(255) NOT NULL,'\n ' ref CHAR(255) NOT NULL,'\n ' path CHAR(255) NOT NULL,'\n ' PRIMARY KEY (repo, ref)'\n ');'\n )\n\n # Atomic file move\n os.rename(tmpfile, self.db_path)\n\n def _create(self):\n if os.path.exists(self.db_path):\n return\n if not os.path.exists(self.directory):\n os.makedirs(self.directory)\n self._write_readme()\n self._write_sqlite_db()\n\n def require_created(self):\n \"\"\"Require the pre-commit file store to be created.\"\"\"\n if self.__created:\n return\n\n self._create()\n self.__created = True\n\n def clone(self, url, sha):\n \"\"\"Clone the given url and checkout the specific sha.\"\"\"\n self.require_created()\n\n # Check if we already exist\n with sqlite3.connect(self.db_path) as db:\n result = db.execute(\n 'SELECT path FROM repos WHERE repo = ? AND ref = ?',\n [url, sha],\n ).fetchone()\n if result:\n return result[0]\n\n logger.info('Initializing environment for {0}.'.format(url))\n\n dir = tempfile.mkdtemp(prefix='repo', dir=self.directory)\n with clean_path_on_failure(dir):\n cmd_output('git', 'clone', '--no-checkout', url, dir)\n with cwd(dir):\n cmd_output('git', 'checkout', sha)\n\n # Update our db with the created repo\n with sqlite3.connect(self.db_path) as db:\n db.execute(\n 'INSERT INTO repos (repo, ref, path) VALUES (?, ?, ?)',\n [url, sha, dir],\n )\n return dir\n\n def get_repo_path_getter(self, repo, sha):\n return self.RepoPathGetter(repo, sha, self)\n\n @cached_property\n def cmd_runner(self):\n return PrefixedCommandRunner(self.directory)\n\n @cached_property\n def db_path(self):\n return os.path.join(self.directory, 'db.db')\n", "path": "pre_commit/store.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport io\nimport logging\nimport os\nimport os.path\nimport sqlite3\nimport tempfile\n\nfrom cached_property import cached_property\n\nfrom pre_commit.prefixed_command_runner import PrefixedCommandRunner\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cwd\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _get_default_directory():\n \"\"\"Returns the default directory for the Store. This is intentionally\n underscored to indicate that `Store.get_default_directory` is the intended\n way to get this information. This is also done so\n `Store.get_default_directory` can be mocked in tests and\n `_get_default_directory` can be tested.\n \"\"\"\n return os.environ.get(\n 'PRE_COMMIT_HOME',\n os.path.join(os.path.expanduser('~'), '.pre-commit'),\n )\n\n\nclass Store(object):\n get_default_directory = staticmethod(_get_default_directory)\n\n class RepoPathGetter(object):\n def __init__(self, repo, sha, store):\n self._repo = repo\n self._sha = sha\n self._store = store\n\n @cached_property\n def repo_path(self):\n return self._store.clone(self._repo, self._sha)\n\n def __init__(self, directory=None):\n if directory is None:\n directory = self.get_default_directory()\n\n self.directory = directory\n self.__created = False\n\n def _write_readme(self):\n with io.open(os.path.join(self.directory, 'README'), 'w') as readme:\n readme.write(\n 'This directory is maintained by the pre-commit project.\\n'\n 'Learn more: https://github.com/pre-commit/pre-commit\\n'\n )\n\n def _write_sqlite_db(self):\n # To avoid a race where someone ^Cs between db creation and execution\n # of the CREATE TABLE statement\n fd, tmpfile = tempfile.mkstemp(dir=self.directory)\n # We'll be managing this file ourselves\n os.close(fd)\n # sqlite doesn't close its fd with its contextmanager >.<\n # contextlib.closing fixes this.\n # See: http://stackoverflow.com/a/28032829/812183\n with contextlib.closing(sqlite3.connect(tmpfile)) as db:\n db.executescript(\n 'CREATE TABLE repos ('\n ' repo CHAR(255) NOT NULL,'\n ' ref CHAR(255) NOT NULL,'\n ' path CHAR(255) NOT NULL,'\n ' PRIMARY KEY (repo, ref)'\n ');'\n )\n\n # Atomic file move\n os.rename(tmpfile, self.db_path)\n\n def _create(self):\n if os.path.exists(self.db_path):\n return\n if not os.path.exists(self.directory):\n os.makedirs(self.directory)\n self._write_readme()\n self._write_sqlite_db()\n\n def require_created(self):\n \"\"\"Require the pre-commit file store to be created.\"\"\"\n if self.__created:\n return\n\n self._create()\n self.__created = True\n\n def clone(self, url, sha):\n \"\"\"Clone the given url and checkout the specific sha.\"\"\"\n self.require_created()\n\n # Check if we already exist\n with sqlite3.connect(self.db_path) as db:\n result = db.execute(\n 'SELECT path FROM repos WHERE repo = ? AND ref = ?',\n [url, sha],\n ).fetchone()\n if result:\n return result[0]\n\n logger.info('Initializing environment for {0}.'.format(url))\n\n dir = tempfile.mkdtemp(prefix='repo', dir=self.directory)\n with clean_path_on_failure(dir):\n cmd_output('git', 'clone', '--no-checkout', url, dir)\n with cwd(dir):\n cmd_output('git', 'reset', sha, '--hard')\n\n # Update our db with the created repo\n with sqlite3.connect(self.db_path) as db:\n db.execute(\n 'INSERT INTO repos (repo, ref, path) VALUES (?, ?, ?)',\n [url, sha, dir],\n )\n return dir\n\n def get_repo_path_getter(self, repo, sha):\n return self.RepoPathGetter(repo, sha, self)\n\n @cached_property\n def cmd_runner(self):\n return PrefixedCommandRunner(self.directory)\n\n @cached_property\n def db_path(self):\n return os.path.join(self.directory, 'db.db')\n", "path": "pre_commit/store.py"}]}
| 3,308 | 123 |
gh_patches_debug_35739
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-center-index-12771
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[package] wayland/all: incompatible with latest (1.52.0) Conan version
### Package and Environment Details
* Package Name/Version: **wayland/1.20.0**
* Conan version: **conan 1.52.0**
### Conan profile
_No response_
### Steps to reproduce
conan export recipes/wayland/all wayland/1.20.0@
### Logs
<details><summary>Click to expand log</summary>
```
File "recipes/wayland/all/conanfile.py", line 5, in <module>
from conan.tools.gnu.pkgconfigdeps.pc_files_creator import get_pc_files_and_content
ModuleNotFoundError: No module named 'conan.tools.gnu.pkgconfigdeps.pc_files_creator'; 'conan.tools.gnu.pkgconfigdeps' is not a package
```
</details>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/wayland/all/conanfile.py`
Content:
```
1 from conan import ConanFile
2 from conan.errors import ConanInvalidConfiguration
3 from conan.tools.build import cross_building
4 from conan.tools.files import copy, get, mkdir, replace_in_file, rmdir, save
5 from conan.tools.gnu.pkgconfigdeps.pc_files_creator import get_pc_files_and_content
6 from conan.tools.layout import basic_layout
7 from conan.tools.meson import Meson, MesonToolchain
8 from conan.tools.scm import Version
9 import os
10
11 required_conan_version = ">=1.50.0"
12
13
14 class WaylandConan(ConanFile):
15 name = "wayland"
16 description = (
17 "Wayland is a project to define a protocol for a compositor to talk to "
18 "its clients as well as a library implementation of the protocol"
19 )
20 topics = ("protocol", "compositor", "display")
21 url = "https://github.com/conan-io/conan-center-index"
22 homepage = "https://wayland.freedesktop.org"
23 license = "MIT"
24
25 settings = "os", "arch", "compiler", "build_type"
26 options = {
27 "shared": [True, False],
28 "fPIC": [True, False],
29 "enable_libraries": [True, False],
30 "enable_dtd_validation": [True, False],
31 }
32 default_options = {
33 "shared": False,
34 "fPIC": True,
35 "enable_libraries": True,
36 "enable_dtd_validation": True,
37 }
38
39 generators = "PkgConfigDeps", "VirtualBuildEnv", "VirtualRunEnv"
40
41 def configure(self):
42 if self.options.shared:
43 del self.options.fPIC
44 try:
45 del self.settings.compiler.libcxx
46 except Exception:
47 pass
48 try:
49 del self.settings.compiler.cppstd
50 except Exception:
51 pass
52
53 def requirements(self):
54 if self.options.enable_libraries:
55 self.requires("libffi/3.4.2")
56 if self.options.enable_dtd_validation:
57 self.requires("libxml2/2.9.14")
58 self.requires("expat/2.4.8")
59
60 def validate(self):
61 if self.info.settings.os != "Linux":
62 raise ConanInvalidConfiguration("Wayland can be built on Linux only")
63
64 def build_requirements(self):
65 self.tool_requires("meson/0.63.1")
66 self.tool_requires("pkgconf/1.7.4")
67 if cross_building(self):
68 self.tool_requires(self.ref)
69
70 def layout(self):
71 basic_layout(self, src_folder="src")
72
73 def source(self):
74 get(self, **self.conan_data["sources"][self.version], strip_root=True)
75
76 def generate(self):
77 tc = MesonToolchain(self)
78 tc.project_options["libdir"] = "lib"
79 tc.project_options["datadir"] = "res"
80 tc.project_options["libraries"] = self.options.enable_libraries
81 tc.project_options["dtd_validation"] = self.options.enable_dtd_validation
82 tc.project_options["documentation"] = False
83 if Version(self.version) >= "1.18.91":
84 tc.project_options["scanner"] = True
85
86 # Generate PC files for the tool_requires wayland package to ensure wayland-scanner is found for build machine.
87 if cross_building(self):
88 native_generators_folder = os.path.join(self.generators_folder, "native")
89 mkdir(self, native_generators_folder)
90 for target in ["wayland", "expat", "libxml2", "libiconv"]:
91 for pc_name, pc_content in get_pc_files_and_content(self, self.dependencies.build[target]).items():
92 save(self, os.path.join(native_generators_folder, pc_name), pc_content)
93 tc.project_options["build.pkg_config_path"] = native_generators_folder
94 tc.generate()
95
96 def _patch_sources(self):
97 replace_in_file(self, os.path.join(self.source_folder, "meson.build"),
98 "subdir('tests')", "#subdir('tests')")
99
100 def build(self):
101 self._patch_sources()
102 meson = Meson(self)
103 meson.configure()
104 meson.build()
105
106 def package(self):
107 copy(self, "COPYING", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
108 meson = Meson(self)
109 meson.install()
110 pkg_config_dir = os.path.join(self.package_folder, "lib", "pkgconfig")
111 rmdir(self, pkg_config_dir)
112
113 def package_info(self):
114 self.cpp_info.components["wayland-scanner"].set_property("pkg_config_name", "wayland-scanner")
115 self.cpp_info.components["wayland-scanner"].names["pkg_config"] = "wayland-scanner"
116 self.cpp_info.components["wayland-scanner"].resdirs = ["res"]
117
118 self.cpp_info.components["wayland-scanner"].includedirs = []
119 self.cpp_info.components["wayland-scanner"].libdirs = []
120 self.cpp_info.components["wayland-scanner"].set_property("component_version", self.version)
121
122 self.cpp_info.components["wayland-scanner"].requires = ["expat::expat"]
123 if self.options.enable_dtd_validation:
124 self.cpp_info.components["wayland-scanner"].requires.append("libxml2::libxml2")
125 pkgconfig_variables = {
126 'datarootdir': '${prefix}/res',
127 'pkgdatadir': '${datarootdir}/wayland',
128 'bindir': '${prefix}/bin',
129 'wayland_scanner': '${bindir}/wayland-scanner',
130 }
131 self.cpp_info.components["wayland-scanner"].set_property(
132 "pkg_config_custom_content",
133 "\n".join(f"{key}={value}" for key,value in pkgconfig_variables.items()))
134
135 bindir = os.path.join(self.package_folder, "bin")
136 self.buildenv_info.prepend_path("PATH", bindir)
137 self.runenv_info.prepend_path("PATH", bindir)
138 # TODO: Remove in Conan 2.0 where Environment class will be required.
139 self.output.info("Appending PATH environment variable: {}".format(bindir))
140 self.env_info.PATH.append(bindir)
141
142 if self.options.enable_libraries:
143 self.cpp_info.components["wayland-server"].libs = ["wayland-server"]
144 self.cpp_info.components["wayland-server"].set_property("pkg_config_name", "wayland-server")
145 self.cpp_info.components["wayland-server"].names["pkg_config"] = "wayland-server"
146 self.cpp_info.components["wayland-server"].requires = ["libffi::libffi"]
147 self.cpp_info.components["wayland-server"].system_libs = ["pthread", "m"]
148 self.cpp_info.components["wayland-server"].resdirs = ["res"]
149 if self.version >= Version("1.21.0") and self.settings.os == "Linux":
150 self.cpp_info.components["wayland-server"].system_libs += ["rt"]
151 self.cpp_info.components["wayland-server"].set_property("component_version", self.version)
152
153 pkgconfig_variables = {
154 'datarootdir': '${prefix}/res',
155 'pkgdatadir': '${datarootdir}/wayland',
156 }
157 self.cpp_info.components["wayland-server"].set_property(
158 "pkg_config_custom_content",
159 "\n".join(f"{key}={value}" for key, value in pkgconfig_variables.items()))
160
161 self.cpp_info.components["wayland-client"].libs = ["wayland-client"]
162 self.cpp_info.components["wayland-client"].set_property("pkg_config_name", "wayland-client")
163 self.cpp_info.components["wayland-client"].names["pkg_config"] = "wayland-client"
164 self.cpp_info.components["wayland-client"].requires = ["libffi::libffi"]
165 self.cpp_info.components["wayland-client"].system_libs = ["pthread", "m"]
166 self.cpp_info.components["wayland-client"].resdirs = ["res"]
167 if self.version >= Version("1.21.0") and self.settings.os == "Linux":
168 self.cpp_info.components["wayland-client"].system_libs += ["rt"]
169 self.cpp_info.components["wayland-client"].set_property("component_version", self.version)
170
171 pkgconfig_variables = {
172 'datarootdir': '${prefix}/res',
173 'pkgdatadir': '${datarootdir}/wayland',
174 }
175 self.cpp_info.components["wayland-client"].set_property(
176 "pkg_config_custom_content",
177 "\n".join(f"{key}={value}" for key, value in pkgconfig_variables.items()))
178
179 self.cpp_info.components["wayland-cursor"].libs = ["wayland-cursor"]
180 self.cpp_info.components["wayland-cursor"].set_property("pkg_config_name", "wayland-cursor")
181 self.cpp_info.components["wayland-cursor"].names["pkg_config"] = "wayland-cursor"
182 self.cpp_info.components["wayland-cursor"].requires = ["wayland-client"]
183 self.cpp_info.components["wayland-cursor"].set_property("component_version", self.version)
184
185 self.cpp_info.components["wayland-egl"].libs = ["wayland-egl"]
186 self.cpp_info.components["wayland-egl"].set_property("pkg_config_name", "wayland-egl")
187 self.cpp_info.components["wayland-egl"].names["pkg_config"] = "wayland-egl"
188 self.cpp_info.components["wayland-egl"].requires = ["wayland-client"]
189 self.cpp_info.components["wayland-egl"].set_property("component_version", "18.1.0")
190
191 self.cpp_info.components["wayland-egl-backend"].names["pkg_config"] = "wayland-egl-backend"
192 self.cpp_info.components["wayland-egl-backend"].set_property("pkg_config_name", "wayland-egl-backend")
193 self.cpp_info.components["wayland-egl-backend"].set_property("component_version", "3")
194
195 bindir = os.path.join(self.package_folder, "bin")
196 self.output.info("Appending PATH environment variable: {}".format(bindir))
197 self.env_info.PATH.append(bindir)
198
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/recipes/wayland/all/conanfile.py b/recipes/wayland/all/conanfile.py
--- a/recipes/wayland/all/conanfile.py
+++ b/recipes/wayland/all/conanfile.py
@@ -1,8 +1,7 @@
from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.build import cross_building
-from conan.tools.files import copy, get, mkdir, replace_in_file, rmdir, save
-from conan.tools.gnu.pkgconfigdeps.pc_files_creator import get_pc_files_and_content
+from conan.tools.files import copy, get, replace_in_file, rmdir
from conan.tools.layout import basic_layout
from conan.tools.meson import Meson, MesonToolchain
from conan.tools.scm import Version
@@ -82,21 +81,20 @@
tc.project_options["documentation"] = False
if Version(self.version) >= "1.18.91":
tc.project_options["scanner"] = True
-
- # Generate PC files for the tool_requires wayland package to ensure wayland-scanner is found for build machine.
- if cross_building(self):
- native_generators_folder = os.path.join(self.generators_folder, "native")
- mkdir(self, native_generators_folder)
- for target in ["wayland", "expat", "libxml2", "libiconv"]:
- for pc_name, pc_content in get_pc_files_and_content(self, self.dependencies.build[target]).items():
- save(self, os.path.join(native_generators_folder, pc_name), pc_content)
- tc.project_options["build.pkg_config_path"] = native_generators_folder
tc.generate()
def _patch_sources(self):
replace_in_file(self, os.path.join(self.source_folder, "meson.build"),
"subdir('tests')", "#subdir('tests')")
+ if cross_building(self):
+ replace_in_file(self, f"{self.source_folder}/src/meson.build",
+ "scanner_dep = dependency('wayland-scanner', native: true, version: meson.project_version())",
+ "# scanner_dep = dependency('wayland-scanner', native: true, version: meson.project_version())")
+ replace_in_file(self, f"{self.source_folder}/src/meson.build",
+ "wayland_scanner_for_build = find_program(scanner_dep.get_variable(pkgconfig: 'wayland_scanner'))",
+ "wayland_scanner_for_build = find_program('wayland-scanner')")
+
def build(self):
self._patch_sources()
meson = Meson(self)
|
{"golden_diff": "diff --git a/recipes/wayland/all/conanfile.py b/recipes/wayland/all/conanfile.py\n--- a/recipes/wayland/all/conanfile.py\n+++ b/recipes/wayland/all/conanfile.py\n@@ -1,8 +1,7 @@\n from conan import ConanFile\n from conan.errors import ConanInvalidConfiguration\n from conan.tools.build import cross_building\n-from conan.tools.files import copy, get, mkdir, replace_in_file, rmdir, save\n-from conan.tools.gnu.pkgconfigdeps.pc_files_creator import get_pc_files_and_content\n+from conan.tools.files import copy, get, replace_in_file, rmdir\n from conan.tools.layout import basic_layout\n from conan.tools.meson import Meson, MesonToolchain\n from conan.tools.scm import Version\n@@ -82,21 +81,20 @@\n tc.project_options[\"documentation\"] = False\n if Version(self.version) >= \"1.18.91\":\n tc.project_options[\"scanner\"] = True\n-\n- # Generate PC files for the tool_requires wayland package to ensure wayland-scanner is found for build machine.\n- if cross_building(self):\n- native_generators_folder = os.path.join(self.generators_folder, \"native\")\n- mkdir(self, native_generators_folder)\n- for target in [\"wayland\", \"expat\", \"libxml2\", \"libiconv\"]:\n- for pc_name, pc_content in get_pc_files_and_content(self, self.dependencies.build[target]).items():\n- save(self, os.path.join(native_generators_folder, pc_name), pc_content)\n- tc.project_options[\"build.pkg_config_path\"] = native_generators_folder\n tc.generate()\n \n def _patch_sources(self):\n replace_in_file(self, os.path.join(self.source_folder, \"meson.build\"),\n \"subdir('tests')\", \"#subdir('tests')\")\n \n+ if cross_building(self):\n+ replace_in_file(self, f\"{self.source_folder}/src/meson.build\",\n+ \"scanner_dep = dependency('wayland-scanner', native: true, version: meson.project_version())\",\n+ \"# scanner_dep = dependency('wayland-scanner', native: true, version: meson.project_version())\")\n+ replace_in_file(self, f\"{self.source_folder}/src/meson.build\",\n+ \"wayland_scanner_for_build = find_program(scanner_dep.get_variable(pkgconfig: 'wayland_scanner'))\",\n+ \"wayland_scanner_for_build = find_program('wayland-scanner')\")\n+\n def build(self):\n self._patch_sources()\n meson = Meson(self)\n", "issue": "[package] wayland/all: incompatible with latest (1.52.0) Conan version\n### Package and Environment Details\n\n* Package Name/Version: **wayland/1.20.0**\r\n* Conan version: **conan 1.52.0**\n\n### Conan profile\n\n_No response_\n\n### Steps to reproduce\n\nconan export recipes/wayland/all wayland/1.20.0@\n\n### Logs\n\n<details><summary>Click to expand log</summary>\r\n\r\n```\r\nFile \"recipes/wayland/all/conanfile.py\", line 5, in <module>\r\n from conan.tools.gnu.pkgconfigdeps.pc_files_creator import get_pc_files_and_content\r\nModuleNotFoundError: No module named 'conan.tools.gnu.pkgconfigdeps.pc_files_creator'; 'conan.tools.gnu.pkgconfigdeps' is not a package\r\n```\r\n\r\n</details>\r\n\n", "before_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.build import cross_building\nfrom conan.tools.files import copy, get, mkdir, replace_in_file, rmdir, save\nfrom conan.tools.gnu.pkgconfigdeps.pc_files_creator import get_pc_files_and_content\nfrom conan.tools.layout import basic_layout\nfrom conan.tools.meson import Meson, MesonToolchain\nfrom conan.tools.scm import Version\nimport os\n\nrequired_conan_version = \">=1.50.0\"\n\n\nclass WaylandConan(ConanFile):\n name = \"wayland\"\n description = (\n \"Wayland is a project to define a protocol for a compositor to talk to \"\n \"its clients as well as a library implementation of the protocol\"\n )\n topics = (\"protocol\", \"compositor\", \"display\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://wayland.freedesktop.org\"\n license = \"MIT\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"enable_libraries\": [True, False],\n \"enable_dtd_validation\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"enable_libraries\": True,\n \"enable_dtd_validation\": True,\n }\n\n generators = \"PkgConfigDeps\", \"VirtualBuildEnv\", \"VirtualRunEnv\"\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n try:\n del self.settings.compiler.libcxx\n except Exception:\n pass\n try:\n del self.settings.compiler.cppstd\n except Exception:\n pass\n\n def requirements(self):\n if self.options.enable_libraries:\n self.requires(\"libffi/3.4.2\")\n if self.options.enable_dtd_validation:\n self.requires(\"libxml2/2.9.14\")\n self.requires(\"expat/2.4.8\")\n\n def validate(self):\n if self.info.settings.os != \"Linux\":\n raise ConanInvalidConfiguration(\"Wayland can be built on Linux only\")\n\n def build_requirements(self):\n self.tool_requires(\"meson/0.63.1\")\n self.tool_requires(\"pkgconf/1.7.4\")\n if cross_building(self):\n self.tool_requires(self.ref)\n\n def layout(self):\n basic_layout(self, src_folder=\"src\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n\n def generate(self):\n tc = MesonToolchain(self)\n tc.project_options[\"libdir\"] = \"lib\"\n tc.project_options[\"datadir\"] = \"res\"\n tc.project_options[\"libraries\"] = self.options.enable_libraries\n tc.project_options[\"dtd_validation\"] = self.options.enable_dtd_validation\n tc.project_options[\"documentation\"] = False\n if Version(self.version) >= \"1.18.91\":\n tc.project_options[\"scanner\"] = True\n\n # Generate PC files for the tool_requires wayland package to ensure wayland-scanner is found for build machine.\n if cross_building(self):\n native_generators_folder = os.path.join(self.generators_folder, \"native\")\n mkdir(self, native_generators_folder)\n for target in [\"wayland\", \"expat\", \"libxml2\", \"libiconv\"]:\n for pc_name, pc_content in get_pc_files_and_content(self, self.dependencies.build[target]).items():\n save(self, os.path.join(native_generators_folder, pc_name), pc_content)\n tc.project_options[\"build.pkg_config_path\"] = native_generators_folder\n tc.generate()\n\n def _patch_sources(self):\n replace_in_file(self, os.path.join(self.source_folder, \"meson.build\"),\n \"subdir('tests')\", \"#subdir('tests')\")\n\n def build(self):\n self._patch_sources()\n meson = Meson(self)\n meson.configure()\n meson.build()\n\n def package(self):\n copy(self, \"COPYING\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n meson = Meson(self)\n meson.install()\n pkg_config_dir = os.path.join(self.package_folder, \"lib\", \"pkgconfig\")\n rmdir(self, pkg_config_dir)\n\n def package_info(self):\n self.cpp_info.components[\"wayland-scanner\"].set_property(\"pkg_config_name\", \"wayland-scanner\")\n self.cpp_info.components[\"wayland-scanner\"].names[\"pkg_config\"] = \"wayland-scanner\"\n self.cpp_info.components[\"wayland-scanner\"].resdirs = [\"res\"]\n\n self.cpp_info.components[\"wayland-scanner\"].includedirs = []\n self.cpp_info.components[\"wayland-scanner\"].libdirs = []\n self.cpp_info.components[\"wayland-scanner\"].set_property(\"component_version\", self.version)\n\n self.cpp_info.components[\"wayland-scanner\"].requires = [\"expat::expat\"]\n if self.options.enable_dtd_validation:\n self.cpp_info.components[\"wayland-scanner\"].requires.append(\"libxml2::libxml2\")\n pkgconfig_variables = {\n 'datarootdir': '${prefix}/res',\n 'pkgdatadir': '${datarootdir}/wayland',\n 'bindir': '${prefix}/bin',\n 'wayland_scanner': '${bindir}/wayland-scanner',\n }\n self.cpp_info.components[\"wayland-scanner\"].set_property(\n \"pkg_config_custom_content\",\n \"\\n\".join(f\"{key}={value}\" for key,value in pkgconfig_variables.items()))\n\n bindir = os.path.join(self.package_folder, \"bin\")\n self.buildenv_info.prepend_path(\"PATH\", bindir)\n self.runenv_info.prepend_path(\"PATH\", bindir)\n # TODO: Remove in Conan 2.0 where Environment class will be required.\n self.output.info(\"Appending PATH environment variable: {}\".format(bindir))\n self.env_info.PATH.append(bindir)\n\n if self.options.enable_libraries:\n self.cpp_info.components[\"wayland-server\"].libs = [\"wayland-server\"]\n self.cpp_info.components[\"wayland-server\"].set_property(\"pkg_config_name\", \"wayland-server\")\n self.cpp_info.components[\"wayland-server\"].names[\"pkg_config\"] = \"wayland-server\"\n self.cpp_info.components[\"wayland-server\"].requires = [\"libffi::libffi\"]\n self.cpp_info.components[\"wayland-server\"].system_libs = [\"pthread\", \"m\"]\n self.cpp_info.components[\"wayland-server\"].resdirs = [\"res\"]\n if self.version >= Version(\"1.21.0\") and self.settings.os == \"Linux\":\n self.cpp_info.components[\"wayland-server\"].system_libs += [\"rt\"]\n self.cpp_info.components[\"wayland-server\"].set_property(\"component_version\", self.version)\n\n pkgconfig_variables = {\n 'datarootdir': '${prefix}/res',\n 'pkgdatadir': '${datarootdir}/wayland',\n }\n self.cpp_info.components[\"wayland-server\"].set_property(\n \"pkg_config_custom_content\",\n \"\\n\".join(f\"{key}={value}\" for key, value in pkgconfig_variables.items()))\n\n self.cpp_info.components[\"wayland-client\"].libs = [\"wayland-client\"]\n self.cpp_info.components[\"wayland-client\"].set_property(\"pkg_config_name\", \"wayland-client\")\n self.cpp_info.components[\"wayland-client\"].names[\"pkg_config\"] = \"wayland-client\"\n self.cpp_info.components[\"wayland-client\"].requires = [\"libffi::libffi\"]\n self.cpp_info.components[\"wayland-client\"].system_libs = [\"pthread\", \"m\"]\n self.cpp_info.components[\"wayland-client\"].resdirs = [\"res\"]\n if self.version >= Version(\"1.21.0\") and self.settings.os == \"Linux\":\n self.cpp_info.components[\"wayland-client\"].system_libs += [\"rt\"]\n self.cpp_info.components[\"wayland-client\"].set_property(\"component_version\", self.version)\n\n pkgconfig_variables = {\n 'datarootdir': '${prefix}/res',\n 'pkgdatadir': '${datarootdir}/wayland',\n }\n self.cpp_info.components[\"wayland-client\"].set_property(\n \"pkg_config_custom_content\",\n \"\\n\".join(f\"{key}={value}\" for key, value in pkgconfig_variables.items()))\n\n self.cpp_info.components[\"wayland-cursor\"].libs = [\"wayland-cursor\"]\n self.cpp_info.components[\"wayland-cursor\"].set_property(\"pkg_config_name\", \"wayland-cursor\")\n self.cpp_info.components[\"wayland-cursor\"].names[\"pkg_config\"] = \"wayland-cursor\"\n self.cpp_info.components[\"wayland-cursor\"].requires = [\"wayland-client\"]\n self.cpp_info.components[\"wayland-cursor\"].set_property(\"component_version\", self.version)\n\n self.cpp_info.components[\"wayland-egl\"].libs = [\"wayland-egl\"]\n self.cpp_info.components[\"wayland-egl\"].set_property(\"pkg_config_name\", \"wayland-egl\")\n self.cpp_info.components[\"wayland-egl\"].names[\"pkg_config\"] = \"wayland-egl\"\n self.cpp_info.components[\"wayland-egl\"].requires = [\"wayland-client\"]\n self.cpp_info.components[\"wayland-egl\"].set_property(\"component_version\", \"18.1.0\")\n\n self.cpp_info.components[\"wayland-egl-backend\"].names[\"pkg_config\"] = \"wayland-egl-backend\"\n self.cpp_info.components[\"wayland-egl-backend\"].set_property(\"pkg_config_name\", \"wayland-egl-backend\")\n self.cpp_info.components[\"wayland-egl-backend\"].set_property(\"component_version\", \"3\")\n\n bindir = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH environment variable: {}\".format(bindir))\n self.env_info.PATH.append(bindir)\n", "path": "recipes/wayland/all/conanfile.py"}], "after_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.build import cross_building\nfrom conan.tools.files import copy, get, replace_in_file, rmdir\nfrom conan.tools.layout import basic_layout\nfrom conan.tools.meson import Meson, MesonToolchain\nfrom conan.tools.scm import Version\nimport os\n\nrequired_conan_version = \">=1.50.0\"\n\n\nclass WaylandConan(ConanFile):\n name = \"wayland\"\n description = (\n \"Wayland is a project to define a protocol for a compositor to talk to \"\n \"its clients as well as a library implementation of the protocol\"\n )\n topics = (\"protocol\", \"compositor\", \"display\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://wayland.freedesktop.org\"\n license = \"MIT\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"enable_libraries\": [True, False],\n \"enable_dtd_validation\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"enable_libraries\": True,\n \"enable_dtd_validation\": True,\n }\n\n generators = \"PkgConfigDeps\", \"VirtualBuildEnv\", \"VirtualRunEnv\"\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n try:\n del self.settings.compiler.libcxx\n except Exception:\n pass\n try:\n del self.settings.compiler.cppstd\n except Exception:\n pass\n\n def requirements(self):\n if self.options.enable_libraries:\n self.requires(\"libffi/3.4.2\")\n if self.options.enable_dtd_validation:\n self.requires(\"libxml2/2.9.14\")\n self.requires(\"expat/2.4.8\")\n\n def validate(self):\n if self.info.settings.os != \"Linux\":\n raise ConanInvalidConfiguration(\"Wayland can be built on Linux only\")\n\n def build_requirements(self):\n self.tool_requires(\"meson/0.63.1\")\n self.tool_requires(\"pkgconf/1.7.4\")\n if cross_building(self):\n self.tool_requires(self.ref)\n\n def layout(self):\n basic_layout(self, src_folder=\"src\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n\n def generate(self):\n tc = MesonToolchain(self)\n tc.project_options[\"libdir\"] = \"lib\"\n tc.project_options[\"datadir\"] = \"res\"\n tc.project_options[\"libraries\"] = self.options.enable_libraries\n tc.project_options[\"dtd_validation\"] = self.options.enable_dtd_validation\n tc.project_options[\"documentation\"] = False\n if Version(self.version) >= \"1.18.91\":\n tc.project_options[\"scanner\"] = True\n tc.generate()\n\n def _patch_sources(self):\n replace_in_file(self, os.path.join(self.source_folder, \"meson.build\"),\n \"subdir('tests')\", \"#subdir('tests')\")\n\n if cross_building(self):\n replace_in_file(self, f\"{self.source_folder}/src/meson.build\",\n \"scanner_dep = dependency('wayland-scanner', native: true, version: meson.project_version())\",\n \"# scanner_dep = dependency('wayland-scanner', native: true, version: meson.project_version())\")\n replace_in_file(self, f\"{self.source_folder}/src/meson.build\",\n \"wayland_scanner_for_build = find_program(scanner_dep.get_variable(pkgconfig: 'wayland_scanner'))\",\n \"wayland_scanner_for_build = find_program('wayland-scanner')\")\n\n def build(self):\n self._patch_sources()\n meson = Meson(self)\n meson.configure()\n meson.build()\n\n def package(self):\n copy(self, \"COPYING\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n meson = Meson(self)\n meson.install()\n pkg_config_dir = os.path.join(self.package_folder, \"lib\", \"pkgconfig\")\n rmdir(self, pkg_config_dir)\n\n def package_info(self):\n self.cpp_info.components[\"wayland-scanner\"].set_property(\"pkg_config_name\", \"wayland-scanner\")\n self.cpp_info.components[\"wayland-scanner\"].names[\"pkg_config\"] = \"wayland-scanner\"\n self.cpp_info.components[\"wayland-scanner\"].resdirs = [\"res\"]\n\n self.cpp_info.components[\"wayland-scanner\"].includedirs = []\n self.cpp_info.components[\"wayland-scanner\"].libdirs = []\n self.cpp_info.components[\"wayland-scanner\"].set_property(\"component_version\", self.version)\n\n self.cpp_info.components[\"wayland-scanner\"].requires = [\"expat::expat\"]\n if self.options.enable_dtd_validation:\n self.cpp_info.components[\"wayland-scanner\"].requires.append(\"libxml2::libxml2\")\n pkgconfig_variables = {\n 'datarootdir': '${prefix}/res',\n 'pkgdatadir': '${datarootdir}/wayland',\n 'bindir': '${prefix}/bin',\n 'wayland_scanner': '${bindir}/wayland-scanner',\n }\n self.cpp_info.components[\"wayland-scanner\"].set_property(\n \"pkg_config_custom_content\",\n \"\\n\".join(f\"{key}={value}\" for key,value in pkgconfig_variables.items()))\n\n bindir = os.path.join(self.package_folder, \"bin\")\n self.buildenv_info.prepend_path(\"PATH\", bindir)\n self.runenv_info.prepend_path(\"PATH\", bindir)\n # TODO: Remove in Conan 2.0 where Environment class will be required.\n self.output.info(\"Appending PATH environment variable: {}\".format(bindir))\n self.env_info.PATH.append(bindir)\n\n if self.options.enable_libraries:\n self.cpp_info.components[\"wayland-server\"].libs = [\"wayland-server\"]\n self.cpp_info.components[\"wayland-server\"].set_property(\"pkg_config_name\", \"wayland-server\")\n self.cpp_info.components[\"wayland-server\"].names[\"pkg_config\"] = \"wayland-server\"\n self.cpp_info.components[\"wayland-server\"].requires = [\"libffi::libffi\"]\n self.cpp_info.components[\"wayland-server\"].system_libs = [\"pthread\", \"m\"]\n self.cpp_info.components[\"wayland-server\"].resdirs = [\"res\"]\n if self.version >= Version(\"1.21.0\") and self.settings.os == \"Linux\":\n self.cpp_info.components[\"wayland-server\"].system_libs += [\"rt\"]\n self.cpp_info.components[\"wayland-server\"].set_property(\"component_version\", self.version)\n\n pkgconfig_variables = {\n 'datarootdir': '${prefix}/res',\n 'pkgdatadir': '${datarootdir}/wayland',\n }\n self.cpp_info.components[\"wayland-server\"].set_property(\n \"pkg_config_custom_content\",\n \"\\n\".join(f\"{key}={value}\" for key, value in pkgconfig_variables.items()))\n\n self.cpp_info.components[\"wayland-client\"].libs = [\"wayland-client\"]\n self.cpp_info.components[\"wayland-client\"].set_property(\"pkg_config_name\", \"wayland-client\")\n self.cpp_info.components[\"wayland-client\"].names[\"pkg_config\"] = \"wayland-client\"\n self.cpp_info.components[\"wayland-client\"].requires = [\"libffi::libffi\"]\n self.cpp_info.components[\"wayland-client\"].system_libs = [\"pthread\", \"m\"]\n self.cpp_info.components[\"wayland-client\"].resdirs = [\"res\"]\n if self.version >= Version(\"1.21.0\") and self.settings.os == \"Linux\":\n self.cpp_info.components[\"wayland-client\"].system_libs += [\"rt\"]\n self.cpp_info.components[\"wayland-client\"].set_property(\"component_version\", self.version)\n\n pkgconfig_variables = {\n 'datarootdir': '${prefix}/res',\n 'pkgdatadir': '${datarootdir}/wayland',\n }\n self.cpp_info.components[\"wayland-client\"].set_property(\n \"pkg_config_custom_content\",\n \"\\n\".join(f\"{key}={value}\" for key, value in pkgconfig_variables.items()))\n\n self.cpp_info.components[\"wayland-cursor\"].libs = [\"wayland-cursor\"]\n self.cpp_info.components[\"wayland-cursor\"].set_property(\"pkg_config_name\", \"wayland-cursor\")\n self.cpp_info.components[\"wayland-cursor\"].names[\"pkg_config\"] = \"wayland-cursor\"\n self.cpp_info.components[\"wayland-cursor\"].requires = [\"wayland-client\"]\n self.cpp_info.components[\"wayland-cursor\"].set_property(\"component_version\", self.version)\n\n self.cpp_info.components[\"wayland-egl\"].libs = [\"wayland-egl\"]\n self.cpp_info.components[\"wayland-egl\"].set_property(\"pkg_config_name\", \"wayland-egl\")\n self.cpp_info.components[\"wayland-egl\"].names[\"pkg_config\"] = \"wayland-egl\"\n self.cpp_info.components[\"wayland-egl\"].requires = [\"wayland-client\"]\n self.cpp_info.components[\"wayland-egl\"].set_property(\"component_version\", \"18.1.0\")\n\n self.cpp_info.components[\"wayland-egl-backend\"].names[\"pkg_config\"] = \"wayland-egl-backend\"\n self.cpp_info.components[\"wayland-egl-backend\"].set_property(\"pkg_config_name\", \"wayland-egl-backend\")\n self.cpp_info.components[\"wayland-egl-backend\"].set_property(\"component_version\", \"3\")\n\n bindir = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH environment variable: {}\".format(bindir))\n self.env_info.PATH.append(bindir)\n", "path": "recipes/wayland/all/conanfile.py"}]}
| 3,105 | 573 |
gh_patches_debug_4081
|
rasdani/github-patches
|
git_diff
|
dbt-labs__dbt-core-8054
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pin click
resolves #8048
### Description
Pin main to `click>=8.1.1,<8.1.4`
### Checklist
- [ ] I have read [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md) and understand what's expected of me
- [ ] I have signed the [CLA](https://docs.getdbt.com/docs/contributor-license-agreements)
- [ ] I have run this code in development and it appears to resolve the stated issue
- [ ] This PR includes tests, or tests are not required/relevant for this PR
- [ ] I have [opened an issue to add/update docs](https://github.com/dbt-labs/docs.getdbt.com/issues/new/choose), or docs changes are not required/relevant for this PR
- [ ] I have run `changie new` to [create a changelog entry](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#adding-a-changelog-entry)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/setup.py`
Content:
```
1 #!/usr/bin/env python
2 import os
3 import sys
4
5 if sys.version_info < (3, 7, 2):
6 print("Error: dbt does not support this version of Python.")
7 print("Please upgrade to Python 3.7.2 or higher.")
8 sys.exit(1)
9
10
11 from setuptools import setup
12
13 try:
14 from setuptools import find_namespace_packages
15 except ImportError:
16 # the user has a downlevel version of setuptools.
17 print("Error: dbt requires setuptools v40.1.0 or higher.")
18 print('Please upgrade setuptools with "pip install --upgrade setuptools" ' "and try again")
19 sys.exit(1)
20
21
22 this_directory = os.path.abspath(os.path.dirname(__file__))
23 with open(os.path.join(this_directory, "README.md")) as f:
24 long_description = f.read()
25
26
27 package_name = "dbt-core"
28 package_version = "1.3.4"
29 description = """With dbt, data analysts and engineers can build analytics \
30 the way engineers build applications."""
31
32
33 setup(
34 name=package_name,
35 version=package_version,
36 description=description,
37 long_description=long_description,
38 long_description_content_type="text/markdown",
39 author="dbt Labs",
40 author_email="[email protected]",
41 url="https://github.com/dbt-labs/dbt-core",
42 packages=find_namespace_packages(include=["dbt", "dbt.*"]),
43 include_package_data=True,
44 test_suite="test",
45 entry_points={
46 "console_scripts": ["dbt = dbt.main:main"],
47 },
48 install_requires=[
49 "Jinja2==3.1.2",
50 "agate>=1.6,<1.6.4",
51 "click>=7.0,<9",
52 "colorama>=0.3.9,<0.4.6",
53 "hologram>=0.0.14,<=0.0.15",
54 "isodate>=0.6,<0.7",
55 "logbook>=1.5,<1.6",
56 "mashumaro[msgpack]==3.0.4",
57 "minimal-snowplow-tracker==0.0.2",
58 "networkx>=2.3,<2.8.1;python_version<'3.8'",
59 "networkx>=2.3,<3;python_version>='3.8'",
60 "packaging>=20.9,<22.0",
61 "sqlparse>=0.2.3,<0.4.4",
62 "dbt-extractor~=0.4.1",
63 "typing-extensions>=3.7.4",
64 "werkzeug>=1,<3",
65 "pathspec~=0.9.0",
66 "pytz>=2015.7",
67 # the following are all to match snowflake-connector-python
68 "requests<3.0.0",
69 "idna>=2.5,<4",
70 "cffi>=1.9,<2.0.0",
71 "pyyaml>=6.0",
72 ],
73 zip_safe=False,
74 classifiers=[
75 "Development Status :: 5 - Production/Stable",
76 "License :: OSI Approved :: Apache Software License",
77 "Operating System :: Microsoft :: Windows",
78 "Operating System :: MacOS :: MacOS X",
79 "Operating System :: POSIX :: Linux",
80 "Programming Language :: Python :: 3.7",
81 "Programming Language :: Python :: 3.8",
82 "Programming Language :: Python :: 3.9",
83 "Programming Language :: Python :: 3.10",
84 ],
85 python_requires=">=3.7.2",
86 )
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/core/setup.py b/core/setup.py
--- a/core/setup.py
+++ b/core/setup.py
@@ -48,7 +48,8 @@
install_requires=[
"Jinja2==3.1.2",
"agate>=1.6,<1.6.4",
- "click>=7.0,<9",
+ # temporarily pinning click for mypy failures: https://github.com/pallets/click/issues/2558
+ "click>=7.0,<8.1.4",
"colorama>=0.3.9,<0.4.6",
"hologram>=0.0.14,<=0.0.15",
"isodate>=0.6,<0.7",
|
{"golden_diff": "diff --git a/core/setup.py b/core/setup.py\n--- a/core/setup.py\n+++ b/core/setup.py\n@@ -48,7 +48,8 @@\n install_requires=[\n \"Jinja2==3.1.2\",\n \"agate>=1.6,<1.6.4\",\n- \"click>=7.0,<9\",\n+ # temporarily pinning click for mypy failures: https://github.com/pallets/click/issues/2558\n+ \"click>=7.0,<8.1.4\",\n \"colorama>=0.3.9,<0.4.6\",\n \"hologram>=0.0.14,<=0.0.15\",\n \"isodate>=0.6,<0.7\",\n", "issue": "pin click\nresolves #8048 \r\n\r\n### Description\r\n\r\nPin main to `click>=8.1.1,<8.1.4`\r\n\r\n### Checklist\r\n\r\n- [ ] I have read [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md) and understand what's expected of me\r\n- [ ] I have signed the [CLA](https://docs.getdbt.com/docs/contributor-license-agreements)\r\n- [ ] I have run this code in development and it appears to resolve the stated issue\r\n- [ ] This PR includes tests, or tests are not required/relevant for this PR\r\n- [ ] I have [opened an issue to add/update docs](https://github.com/dbt-labs/docs.getdbt.com/issues/new/choose), or docs changes are not required/relevant for this PR\r\n- [ ] I have run `changie new` to [create a changelog entry](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#adding-a-changelog-entry)\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 7, 2):\n print(\"Error: dbt does not support this version of Python.\")\n print(\"Please upgrade to Python 3.7.2 or higher.\")\n sys.exit(1)\n\n\nfrom setuptools import setup\n\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print(\"Error: dbt requires setuptools v40.1.0 or higher.\")\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" ' \"and try again\")\n sys.exit(1)\n\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, \"README.md\")) as f:\n long_description = f.read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"1.3.4\"\ndescription = \"\"\"With dbt, data analysts and engineers can build analytics \\\nthe way engineers build applications.\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"dbt Labs\",\n author_email=\"[email protected]\",\n url=\"https://github.com/dbt-labs/dbt-core\",\n packages=find_namespace_packages(include=[\"dbt\", \"dbt.*\"]),\n include_package_data=True,\n test_suite=\"test\",\n entry_points={\n \"console_scripts\": [\"dbt = dbt.main:main\"],\n },\n install_requires=[\n \"Jinja2==3.1.2\",\n \"agate>=1.6,<1.6.4\",\n \"click>=7.0,<9\",\n \"colorama>=0.3.9,<0.4.6\",\n \"hologram>=0.0.14,<=0.0.15\",\n \"isodate>=0.6,<0.7\",\n \"logbook>=1.5,<1.6\",\n \"mashumaro[msgpack]==3.0.4\",\n \"minimal-snowplow-tracker==0.0.2\",\n \"networkx>=2.3,<2.8.1;python_version<'3.8'\",\n \"networkx>=2.3,<3;python_version>='3.8'\",\n \"packaging>=20.9,<22.0\",\n \"sqlparse>=0.2.3,<0.4.4\",\n \"dbt-extractor~=0.4.1\",\n \"typing-extensions>=3.7.4\",\n \"werkzeug>=1,<3\",\n \"pathspec~=0.9.0\",\n \"pytz>=2015.7\",\n # the following are all to match snowflake-connector-python\n \"requests<3.0.0\",\n \"idna>=2.5,<4\",\n \"cffi>=1.9,<2.0.0\",\n \"pyyaml>=6.0\",\n ],\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n python_requires=\">=3.7.2\",\n)\n", "path": "core/setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 7, 2):\n print(\"Error: dbt does not support this version of Python.\")\n print(\"Please upgrade to Python 3.7.2 or higher.\")\n sys.exit(1)\n\n\nfrom setuptools import setup\n\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print(\"Error: dbt requires setuptools v40.1.0 or higher.\")\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" ' \"and try again\")\n sys.exit(1)\n\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, \"README.md\")) as f:\n long_description = f.read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"1.3.4\"\ndescription = \"\"\"With dbt, data analysts and engineers can build analytics \\\nthe way engineers build applications.\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"dbt Labs\",\n author_email=\"[email protected]\",\n url=\"https://github.com/dbt-labs/dbt-core\",\n packages=find_namespace_packages(include=[\"dbt\", \"dbt.*\"]),\n include_package_data=True,\n test_suite=\"test\",\n entry_points={\n \"console_scripts\": [\"dbt = dbt.main:main\"],\n },\n install_requires=[\n \"Jinja2==3.1.2\",\n \"agate>=1.6,<1.6.4\",\n # temporarily pinning click for mypy failures: https://github.com/pallets/click/issues/2558\n \"click>=7.0,<8.1.4\",\n \"colorama>=0.3.9,<0.4.6\",\n \"hologram>=0.0.14,<=0.0.15\",\n \"isodate>=0.6,<0.7\",\n \"logbook>=1.5,<1.6\",\n \"mashumaro[msgpack]==3.0.4\",\n \"minimal-snowplow-tracker==0.0.2\",\n \"networkx>=2.3,<2.8.1;python_version<'3.8'\",\n \"networkx>=2.3,<3;python_version>='3.8'\",\n \"packaging>=20.9,<22.0\",\n \"sqlparse>=0.2.3,<0.4.4\",\n \"dbt-extractor~=0.4.1\",\n \"typing-extensions>=3.7.4\",\n \"werkzeug>=1,<3\",\n \"pathspec~=0.9.0\",\n \"pytz>=2015.7\",\n # the following are all to match snowflake-connector-python\n \"requests<3.0.0\",\n \"idna>=2.5,<4\",\n \"cffi>=1.9,<2.0.0\",\n \"pyyaml>=6.0\",\n ],\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n python_requires=\">=3.7.2\",\n)\n", "path": "core/setup.py"}]}
| 1,438 | 171 |
gh_patches_debug_42184
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-1016
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
IHG hotels scraper missing IHG Army Hotels
Missing this additional source of hotel listings:
https://www.ihg.com/armyhotels/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/ihg_hotels.py`
Content:
```
1 import json
2 import re
3 import scrapy
4
5 from locations.items import GeojsonPointItem
6
7
8 class IHGHotels(scrapy.Spider):
9
10 name = "ihg_hotels"
11 # allowed_domains = ["ihg.com"] # the Kimpton hotels each have their own domains
12 download_delay = 0.5
13
14 start_urls = (
15 'https://www.ihg.com/holidayinn/destinations/us/en/explore',
16 )
17
18 def parse_hotel(self, response):
19 if 'hoteldetail' not in response.url:
20 # got redirected back to search page
21 return
22
23 street_address = " ".join(response.xpath('//span[@itemprop="streetAddress"]/p/text()').extract())
24 if not street_address:
25 street_address = response.xpath('//span[@itemprop="streetAddress"]/text()').extract_first()
26
27 city = response.xpath('//span[@itemprop="addressLocality"]/text()').extract_first()
28 state = response.xpath('//span[@itemprop="addressRegion"]/text()').extract_first()
29
30 properties = {
31 'ref': "_".join(re.search(r'en/(.*)/(.*)/hoteldetail', response.url).groups()),
32 'name': response.xpath('//meta[@property="og:title"]/@content').extract_first(),
33 'addr_full': street_address.replace(u'\u00a0', ' ').strip(', ') if street_address else None,
34 'city': city.replace(u'\u00a0', ' ').strip(', ') if city else None,
35 'state': state.replace(u'\u00a0', ' ') if state else None,
36 'postcode': response.xpath('//span[@itemprop="postalCode"]/text()').extract_first(),
37 'country': response.xpath('//span[@itemprop="addressCountry"]/text()').extract_first(),
38 'phone': (response.xpath('//span[@itemprop="telephone"]/text()').extract_first() or '').strip('| '),
39 'lat': float(response.xpath('//meta[@property="place:location:latitude"]/@content').extract_first()),
40 'lon': float(response.xpath('//meta[@property="place:location:longitude"]/@content').extract_first()),
41 'website': response.url,
42 }
43
44 yield GeojsonPointItem(**properties)
45
46 def parse_kimpton(self, response):
47 url = response.xpath('//a[contains(text(), "VISIT HOTEL WEBSITE")]/@href').extract_first()
48 properties = {
49 'ref': "_".join(re.search(r'en/(.*)/(.*)/hoteldetail', response.url).groups()),
50 'lat': float(response.xpath('//meta[@property="place:location:latitude"]/@content').extract_first()),
51 'lon': float(response.xpath('//meta[@property="place:location:longitude"]/@content').extract_first()),
52 }
53 if not url: # "opening soon" hotels just have teaser pages
54 return
55 url = url.split('?')[0] # remove querystring
56 yield scrapy.Request(url, callback=self.parse_kimpton_data, meta={"properties": properties})
57
58 def parse_kimpton_data(self, response):
59 properties = response.meta["properties"]
60 script = response.xpath('//script[@type="application/ld+json"]/text()').extract_first()
61 if script:
62 data = json.loads(script)
63 else:
64 data = {}
65 if 'name' in data:
66 properties.update({
67 'name': data["name"],
68 'addr_full': data["address"]["streetAddress"],
69 'city': data["address"]["addressLocality"],
70 'state': data["address"].get("addressRegion"),
71 'postcode': data["address"]["postalCode"],
72 'country': data["address"].get("addressCountry"),
73 'phone': data.get("telephone"),
74 'website': data["url"]
75 })
76
77 else:
78 street_address = " ".join(response.xpath('//span[@itemprop="streetAddress"]/p/text()').extract())
79 if not street_address:
80 street_address = response.xpath('//span[@itemprop="streetAddress"]/text()').extract_first()
81
82 city = response.xpath('//span[@itemprop="addressLocality"]/text()').extract_first()
83 state = response.xpath('//span[@itemprop="addressRegion"]/text()').extract_first()
84
85 properties.update({
86 'name': response.xpath('//meta[@property="og:title"]/@content').extract_first(),
87 'addr_full': street_address.replace(u'\u00a0', ' ').strip(', ') if street_address else None,
88 'city': city.replace(u'\u00a0', ' ').strip(', ') if city else None,
89 'state': state.replace(u'\u00a0', ' ') if state else None,
90 'postcode': response.xpath('//span[@itemprop="postalCode"]/text()').extract_first(),
91 'country': response.xpath('//span[@itemprop="addressCountry"]/text()').extract_first(),
92 'phone': (response.xpath('//span[@itemprop="telephone"]/text()').extract_first() or '').strip('| '),
93 'website': response.url,
94 })
95
96 yield GeojsonPointItem(**properties)
97
98 def parse_regent(self, response):
99 data = json.loads(response.xpath('//script[@type="application/ld+json"]/text()').extract_first())
100
101 properties = {
102 'ref': "_".join(re.search(r'en/(.*)/(.*)/hoteldetail', response.url).groups()),
103 'name': data["name"],
104 'addr_full': data["address"]["streetAddress"],
105 'city': data["address"]["addressLocality"],
106 'state': data["address"].get("addressRegion"),
107 'postcode': data["address"]["postalCode"],
108 'country': data["address"]["addressCountry"],
109 'phone': data["telephone"],
110 'lat': float(data["geo"]["latitude"]),
111 'lon': float(data["geo"]["longitude"]),
112 'website': response.url,
113 }
114
115 yield GeojsonPointItem(**properties)
116
117 def parse_crowne_plaza(self, response):
118 address = response.xpath('//a[@class="hotel-home"]/text()').extract_first().strip()
119
120 address_parts = address.split('|')
121
122 if len(address_parts) == 4: # international addresses
123 addr_city, postcode, country, _ = address_parts
124 state = ''
125 else: # us addresses
126 addr_city, state, postcode, country, _ = address_parts
127
128 street_address = ",".join(addr_city.split(',')[0:-1])
129 city = addr_city.split(',')[-1]
130
131 properties = {
132 'ref': "_".join(re.search(r'en/(.*)/(.*)/hoteldetail', response.url).groups()),
133 'name': response.xpath('//meta[@property="og:title"]/@content').extract_first(),
134 'addr_full': street_address.strip(),
135 'city': city.strip(),
136 'state': state.strip(),
137 'postcode': postcode.strip(),
138 'country': country.strip(),
139 'phone': response.xpath('//div[@class="new-hinfo-address"]/p/a[2]/text()').extract_first(),
140 'lat': float(response.xpath('//meta[@property="place:location:latitude"]/@content').extract_first()),
141 'lon': float(response.xpath('//meta[@property="place:location:longitude"]/@content').extract_first()),
142 'website': response.url,
143 }
144
145 yield GeojsonPointItem(**properties)
146
147 def parse_candlewood_staybridge(self, response):
148 if 'hoteldetail' not in response.url:
149 # got redirected back to search page
150 return
151
152 street_address = " ".join(response.xpath('//span[@itemprop="streetAddress"]/p/text()').extract())
153 if not street_address:
154 street_address = response.xpath('//span[@itemprop="streetAddress"]/text()').extract_first()
155
156 region = response.xpath('//span[@itemprop="addressRegion"]/text()').extract_first().replace(u'\u00a0',' ')
157
158 match = re.search(r'([a-z]+)\s(\d+)\s(.*)', region, re.IGNORECASE)
159 if match:
160 state, postcode, country = match.groups()
161 else:
162 state, postcode, country = None, None, region.strip()
163
164 properties = {
165 'ref': "_".join(re.search(r'en/(.*)/(.*)/hoteldetail', response.url).groups()),
166 'name': response.xpath('//meta[@property="og:title"]/@content').extract_first(),
167 'addr_full': street_address.replace(u'\u00a0', ' ').strip(', '),
168 'city': response.xpath('//span[@itemprop="addressLocality"]/text()').extract_first().replace(u'\u00a0', ' ').strip(', '),
169 'state': state,
170 'postcode': postcode,
171 'country': country,
172 'phone': response.xpath('//div[@class="booking"]/a/text()').extract_first(),
173 'lat': float(response.xpath('//meta[@property="place:location:latitude"]/@content').extract_first()),
174 'lon': float(response.xpath('//meta[@property="place:location:longitude"]/@content').extract_first()),
175 'website': response.url,
176 }
177
178 yield GeojsonPointItem(**properties)
179
180 def parse(self, response):
181
182 hotel_parsers = {
183 'holidayinn': self.parse_hotel,
184 'crowneplaza': self.parse_crowne_plaza,
185 'holidayinnexpress': self.parse_hotel,
186 'hotelindigo': self.parse_hotel,
187 'candlewood': self.parse_candlewood_staybridge,
188 'staybridge': self.parse_candlewood_staybridge,
189 'holidayinnresorts': self.parse_hotel,
190 'intercontinental': self.parse_hotel,
191 'regent': self.parse_regent,
192 'hotels': self.parse_hotel, # vocos
193 'kimptonhotels': self.parse_kimpton,
194 'holidayinnclubvacations': self.parse_hotel,
195 'evenhotels': self.parse_hotel,
196 'avidhotels': self.parse_hotel,
197 'hualuxe': self.parse_hotel
198 }
199
200 hotel_urls = response.xpath('//div[@class="hotelList"]//div[contains(@class, "hotelItem")]//a[contains(@class, "hotel-name")]/@href').extract()
201 if hotel_urls:
202 for url in hotel_urls:
203 hotel_type = re.search(r'ihg.com/(.*?)/', response.urljoin(url), re.IGNORECASE).group(1)
204
205 yield scrapy.Request(response.urljoin(url),
206 callback=hotel_parsers[hotel_type])
207
208 else:
209 urls = response.xpath('//li[@class="listingItem"]/a/@href').extract()
210 for url in urls:
211 yield scrapy.Request(response.urljoin(url))
212
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/ihg_hotels.py b/locations/spiders/ihg_hotels.py
--- a/locations/spiders/ihg_hotels.py
+++ b/locations/spiders/ihg_hotels.py
@@ -13,6 +13,7 @@
start_urls = (
'https://www.ihg.com/holidayinn/destinations/us/en/explore',
+ 'https://www.ihg.com/armyhotels/hotels/us/en/installations',
)
def parse_hotel(self, response):
@@ -32,7 +33,7 @@
'name': response.xpath('//meta[@property="og:title"]/@content').extract_first(),
'addr_full': street_address.replace(u'\u00a0', ' ').strip(', ') if street_address else None,
'city': city.replace(u'\u00a0', ' ').strip(', ') if city else None,
- 'state': state.replace(u'\u00a0', ' ') if state else None,
+ 'state': state.replace(u'\u00a0', ' ').strip(', ') if state else None,
'postcode': response.xpath('//span[@itemprop="postalCode"]/text()').extract_first(),
'country': response.xpath('//span[@itemprop="addressCountry"]/text()').extract_first(),
'phone': (response.xpath('//span[@itemprop="telephone"]/text()').extract_first() or '').strip('| '),
@@ -177,6 +178,23 @@
yield GeojsonPointItem(**properties)
+ def parse_army_hotel(self, response):
+ properties = {
+ 'ref': "_".join(re.search(r'en/(.*)/(.*)/hoteldetail', response.url).groups()),
+ 'name': response.xpath('//meta[@property="og:title"]/@content').extract_first(),
+ 'addr_full': response.xpath('//meta[@property="business:contact_data:street_address"]/@content').extract_first(),
+ 'city': response.xpath('//meta[@property="business:contact_data:locality"]/@content').extract_first(),
+ 'state': response.xpath('//meta[@property="business:contact_data:region"]/@content').extract_first(),
+ 'postcode': response.xpath('//meta[@property="business:contact_data:postal_code"]/@content').extract_first(),
+ 'country': response.xpath('//meta[@property="business:contact_data:country_name"]/@content').extract_first(),
+ 'phone': (response.xpath('//span[@title="Hotel Front Desk:"]/span/text()').extract_first() or "").strip(),
+ 'lat': float(response.xpath('//meta[@property="place:location:latitude"]/@content').extract_first()),
+ 'lon': float(response.xpath('//meta[@property="place:location:longitude"]/@content').extract_first()),
+ 'website': response.url,
+ }
+
+ yield GeojsonPointItem(**properties)
+
def parse(self, response):
hotel_parsers = {
@@ -194,10 +212,15 @@
'holidayinnclubvacations': self.parse_hotel,
'evenhotels': self.parse_hotel,
'avidhotels': self.parse_hotel,
- 'hualuxe': self.parse_hotel
+ 'hualuxe': self.parse_hotel,
+ 'armyhotels': self.parse_army_hotel
}
hotel_urls = response.xpath('//div[@class="hotelList"]//div[contains(@class, "hotelItem")]//a[contains(@class, "hotel-name")]/@href').extract()
+
+ if 'armyhotels' in response.url:
+ hotel_urls = response.xpath('//div[@id="hotelListWrap"]//a/@href').extract()
+
if hotel_urls:
for url in hotel_urls:
hotel_type = re.search(r'ihg.com/(.*?)/', response.urljoin(url), re.IGNORECASE).group(1)
|
{"golden_diff": "diff --git a/locations/spiders/ihg_hotels.py b/locations/spiders/ihg_hotels.py\n--- a/locations/spiders/ihg_hotels.py\n+++ b/locations/spiders/ihg_hotels.py\n@@ -13,6 +13,7 @@\n \n start_urls = (\n 'https://www.ihg.com/holidayinn/destinations/us/en/explore',\n+ 'https://www.ihg.com/armyhotels/hotels/us/en/installations',\n )\n \n def parse_hotel(self, response):\n@@ -32,7 +33,7 @@\n 'name': response.xpath('//meta[@property=\"og:title\"]/@content').extract_first(),\n 'addr_full': street_address.replace(u'\\u00a0', ' ').strip(', ') if street_address else None,\n 'city': city.replace(u'\\u00a0', ' ').strip(', ') if city else None,\n- 'state': state.replace(u'\\u00a0', ' ') if state else None,\n+ 'state': state.replace(u'\\u00a0', ' ').strip(', ') if state else None,\n 'postcode': response.xpath('//span[@itemprop=\"postalCode\"]/text()').extract_first(),\n 'country': response.xpath('//span[@itemprop=\"addressCountry\"]/text()').extract_first(),\n 'phone': (response.xpath('//span[@itemprop=\"telephone\"]/text()').extract_first() or '').strip('| '),\n@@ -177,6 +178,23 @@\n \n yield GeojsonPointItem(**properties)\n \n+ def parse_army_hotel(self, response):\n+ properties = {\n+ 'ref': \"_\".join(re.search(r'en/(.*)/(.*)/hoteldetail', response.url).groups()),\n+ 'name': response.xpath('//meta[@property=\"og:title\"]/@content').extract_first(),\n+ 'addr_full': response.xpath('//meta[@property=\"business:contact_data:street_address\"]/@content').extract_first(),\n+ 'city': response.xpath('//meta[@property=\"business:contact_data:locality\"]/@content').extract_first(),\n+ 'state': response.xpath('//meta[@property=\"business:contact_data:region\"]/@content').extract_first(),\n+ 'postcode': response.xpath('//meta[@property=\"business:contact_data:postal_code\"]/@content').extract_first(),\n+ 'country': response.xpath('//meta[@property=\"business:contact_data:country_name\"]/@content').extract_first(),\n+ 'phone': (response.xpath('//span[@title=\"Hotel Front Desk:\"]/span/text()').extract_first() or \"\").strip(),\n+ 'lat': float(response.xpath('//meta[@property=\"place:location:latitude\"]/@content').extract_first()),\n+ 'lon': float(response.xpath('//meta[@property=\"place:location:longitude\"]/@content').extract_first()),\n+ 'website': response.url,\n+ }\n+\n+ yield GeojsonPointItem(**properties)\n+\n def parse(self, response):\n \n hotel_parsers = {\n@@ -194,10 +212,15 @@\n 'holidayinnclubvacations': self.parse_hotel,\n 'evenhotels': self.parse_hotel,\n 'avidhotels': self.parse_hotel,\n- 'hualuxe': self.parse_hotel\n+ 'hualuxe': self.parse_hotel,\n+ 'armyhotels': self.parse_army_hotel\n }\n \n hotel_urls = response.xpath('//div[@class=\"hotelList\"]//div[contains(@class, \"hotelItem\")]//a[contains(@class, \"hotel-name\")]/@href').extract()\n+\n+ if 'armyhotels' in response.url:\n+ hotel_urls = response.xpath('//div[@id=\"hotelListWrap\"]//a/@href').extract()\n+\n if hotel_urls:\n for url in hotel_urls:\n hotel_type = re.search(r'ihg.com/(.*?)/', response.urljoin(url), re.IGNORECASE).group(1)\n", "issue": "IHG hotels scraper missing IHG Army Hotels\nMissing this additional source of hotel listings:\r\nhttps://www.ihg.com/armyhotels/\n", "before_files": [{"content": "import json\nimport re\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\n\n\nclass IHGHotels(scrapy.Spider):\n\n name = \"ihg_hotels\"\n # allowed_domains = [\"ihg.com\"] # the Kimpton hotels each have their own domains\n download_delay = 0.5\n\n start_urls = (\n 'https://www.ihg.com/holidayinn/destinations/us/en/explore',\n )\n\n def parse_hotel(self, response):\n if 'hoteldetail' not in response.url:\n # got redirected back to search page\n return\n\n street_address = \" \".join(response.xpath('//span[@itemprop=\"streetAddress\"]/p/text()').extract())\n if not street_address:\n street_address = response.xpath('//span[@itemprop=\"streetAddress\"]/text()').extract_first()\n\n city = response.xpath('//span[@itemprop=\"addressLocality\"]/text()').extract_first()\n state = response.xpath('//span[@itemprop=\"addressRegion\"]/text()').extract_first()\n\n properties = {\n 'ref': \"_\".join(re.search(r'en/(.*)/(.*)/hoteldetail', response.url).groups()),\n 'name': response.xpath('//meta[@property=\"og:title\"]/@content').extract_first(),\n 'addr_full': street_address.replace(u'\\u00a0', ' ').strip(', ') if street_address else None,\n 'city': city.replace(u'\\u00a0', ' ').strip(', ') if city else None,\n 'state': state.replace(u'\\u00a0', ' ') if state else None,\n 'postcode': response.xpath('//span[@itemprop=\"postalCode\"]/text()').extract_first(),\n 'country': response.xpath('//span[@itemprop=\"addressCountry\"]/text()').extract_first(),\n 'phone': (response.xpath('//span[@itemprop=\"telephone\"]/text()').extract_first() or '').strip('| '),\n 'lat': float(response.xpath('//meta[@property=\"place:location:latitude\"]/@content').extract_first()),\n 'lon': float(response.xpath('//meta[@property=\"place:location:longitude\"]/@content').extract_first()),\n 'website': response.url,\n }\n\n yield GeojsonPointItem(**properties)\n\n def parse_kimpton(self, response):\n url = response.xpath('//a[contains(text(), \"VISIT HOTEL WEBSITE\")]/@href').extract_first()\n properties = {\n 'ref': \"_\".join(re.search(r'en/(.*)/(.*)/hoteldetail', response.url).groups()),\n 'lat': float(response.xpath('//meta[@property=\"place:location:latitude\"]/@content').extract_first()),\n 'lon': float(response.xpath('//meta[@property=\"place:location:longitude\"]/@content').extract_first()),\n }\n if not url: # \"opening soon\" hotels just have teaser pages\n return\n url = url.split('?')[0] # remove querystring\n yield scrapy.Request(url, callback=self.parse_kimpton_data, meta={\"properties\": properties})\n\n def parse_kimpton_data(self, response):\n properties = response.meta[\"properties\"]\n script = response.xpath('//script[@type=\"application/ld+json\"]/text()').extract_first()\n if script:\n data = json.loads(script)\n else:\n data = {}\n if 'name' in data:\n properties.update({\n 'name': data[\"name\"],\n 'addr_full': data[\"address\"][\"streetAddress\"],\n 'city': data[\"address\"][\"addressLocality\"],\n 'state': data[\"address\"].get(\"addressRegion\"),\n 'postcode': data[\"address\"][\"postalCode\"],\n 'country': data[\"address\"].get(\"addressCountry\"),\n 'phone': data.get(\"telephone\"),\n 'website': data[\"url\"]\n })\n\n else:\n street_address = \" \".join(response.xpath('//span[@itemprop=\"streetAddress\"]/p/text()').extract())\n if not street_address:\n street_address = response.xpath('//span[@itemprop=\"streetAddress\"]/text()').extract_first()\n\n city = response.xpath('//span[@itemprop=\"addressLocality\"]/text()').extract_first()\n state = response.xpath('//span[@itemprop=\"addressRegion\"]/text()').extract_first()\n\n properties.update({\n 'name': response.xpath('//meta[@property=\"og:title\"]/@content').extract_first(),\n 'addr_full': street_address.replace(u'\\u00a0', ' ').strip(', ') if street_address else None,\n 'city': city.replace(u'\\u00a0', ' ').strip(', ') if city else None,\n 'state': state.replace(u'\\u00a0', ' ') if state else None,\n 'postcode': response.xpath('//span[@itemprop=\"postalCode\"]/text()').extract_first(),\n 'country': response.xpath('//span[@itemprop=\"addressCountry\"]/text()').extract_first(),\n 'phone': (response.xpath('//span[@itemprop=\"telephone\"]/text()').extract_first() or '').strip('| '),\n 'website': response.url,\n })\n\n yield GeojsonPointItem(**properties)\n\n def parse_regent(self, response):\n data = json.loads(response.xpath('//script[@type=\"application/ld+json\"]/text()').extract_first())\n\n properties = {\n 'ref': \"_\".join(re.search(r'en/(.*)/(.*)/hoteldetail', response.url).groups()),\n 'name': data[\"name\"],\n 'addr_full': data[\"address\"][\"streetAddress\"],\n 'city': data[\"address\"][\"addressLocality\"],\n 'state': data[\"address\"].get(\"addressRegion\"),\n 'postcode': data[\"address\"][\"postalCode\"],\n 'country': data[\"address\"][\"addressCountry\"],\n 'phone': data[\"telephone\"],\n 'lat': float(data[\"geo\"][\"latitude\"]),\n 'lon': float(data[\"geo\"][\"longitude\"]),\n 'website': response.url,\n }\n\n yield GeojsonPointItem(**properties)\n\n def parse_crowne_plaza(self, response):\n address = response.xpath('//a[@class=\"hotel-home\"]/text()').extract_first().strip()\n\n address_parts = address.split('|')\n\n if len(address_parts) == 4: # international addresses\n addr_city, postcode, country, _ = address_parts\n state = ''\n else: # us addresses\n addr_city, state, postcode, country, _ = address_parts\n\n street_address = \",\".join(addr_city.split(',')[0:-1])\n city = addr_city.split(',')[-1]\n\n properties = {\n 'ref': \"_\".join(re.search(r'en/(.*)/(.*)/hoteldetail', response.url).groups()),\n 'name': response.xpath('//meta[@property=\"og:title\"]/@content').extract_first(),\n 'addr_full': street_address.strip(),\n 'city': city.strip(),\n 'state': state.strip(),\n 'postcode': postcode.strip(),\n 'country': country.strip(),\n 'phone': response.xpath('//div[@class=\"new-hinfo-address\"]/p/a[2]/text()').extract_first(),\n 'lat': float(response.xpath('//meta[@property=\"place:location:latitude\"]/@content').extract_first()),\n 'lon': float(response.xpath('//meta[@property=\"place:location:longitude\"]/@content').extract_first()),\n 'website': response.url,\n }\n\n yield GeojsonPointItem(**properties)\n\n def parse_candlewood_staybridge(self, response):\n if 'hoteldetail' not in response.url:\n # got redirected back to search page\n return\n\n street_address = \" \".join(response.xpath('//span[@itemprop=\"streetAddress\"]/p/text()').extract())\n if not street_address:\n street_address = response.xpath('//span[@itemprop=\"streetAddress\"]/text()').extract_first()\n\n region = response.xpath('//span[@itemprop=\"addressRegion\"]/text()').extract_first().replace(u'\\u00a0',' ')\n\n match = re.search(r'([a-z]+)\\s(\\d+)\\s(.*)', region, re.IGNORECASE)\n if match:\n state, postcode, country = match.groups()\n else:\n state, postcode, country = None, None, region.strip()\n\n properties = {\n 'ref': \"_\".join(re.search(r'en/(.*)/(.*)/hoteldetail', response.url).groups()),\n 'name': response.xpath('//meta[@property=\"og:title\"]/@content').extract_first(),\n 'addr_full': street_address.replace(u'\\u00a0', ' ').strip(', '),\n 'city': response.xpath('//span[@itemprop=\"addressLocality\"]/text()').extract_first().replace(u'\\u00a0', ' ').strip(', '),\n 'state': state,\n 'postcode': postcode,\n 'country': country,\n 'phone': response.xpath('//div[@class=\"booking\"]/a/text()').extract_first(),\n 'lat': float(response.xpath('//meta[@property=\"place:location:latitude\"]/@content').extract_first()),\n 'lon': float(response.xpath('//meta[@property=\"place:location:longitude\"]/@content').extract_first()),\n 'website': response.url,\n }\n\n yield GeojsonPointItem(**properties)\n\n def parse(self, response):\n\n hotel_parsers = {\n 'holidayinn': self.parse_hotel,\n 'crowneplaza': self.parse_crowne_plaza,\n 'holidayinnexpress': self.parse_hotel,\n 'hotelindigo': self.parse_hotel,\n 'candlewood': self.parse_candlewood_staybridge,\n 'staybridge': self.parse_candlewood_staybridge,\n 'holidayinnresorts': self.parse_hotel,\n 'intercontinental': self.parse_hotel,\n 'regent': self.parse_regent,\n 'hotels': self.parse_hotel, # vocos\n 'kimptonhotels': self.parse_kimpton,\n 'holidayinnclubvacations': self.parse_hotel,\n 'evenhotels': self.parse_hotel,\n 'avidhotels': self.parse_hotel,\n 'hualuxe': self.parse_hotel\n }\n\n hotel_urls = response.xpath('//div[@class=\"hotelList\"]//div[contains(@class, \"hotelItem\")]//a[contains(@class, \"hotel-name\")]/@href').extract()\n if hotel_urls:\n for url in hotel_urls:\n hotel_type = re.search(r'ihg.com/(.*?)/', response.urljoin(url), re.IGNORECASE).group(1)\n\n yield scrapy.Request(response.urljoin(url),\n callback=hotel_parsers[hotel_type])\n\n else:\n urls = response.xpath('//li[@class=\"listingItem\"]/a/@href').extract()\n for url in urls:\n yield scrapy.Request(response.urljoin(url))\n", "path": "locations/spiders/ihg_hotels.py"}], "after_files": [{"content": "import json\nimport re\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\n\n\nclass IHGHotels(scrapy.Spider):\n\n name = \"ihg_hotels\"\n # allowed_domains = [\"ihg.com\"] # the Kimpton hotels each have their own domains\n download_delay = 0.5\n\n start_urls = (\n 'https://www.ihg.com/holidayinn/destinations/us/en/explore',\n 'https://www.ihg.com/armyhotels/hotels/us/en/installations',\n )\n\n def parse_hotel(self, response):\n if 'hoteldetail' not in response.url:\n # got redirected back to search page\n return\n\n street_address = \" \".join(response.xpath('//span[@itemprop=\"streetAddress\"]/p/text()').extract())\n if not street_address:\n street_address = response.xpath('//span[@itemprop=\"streetAddress\"]/text()').extract_first()\n\n city = response.xpath('//span[@itemprop=\"addressLocality\"]/text()').extract_first()\n state = response.xpath('//span[@itemprop=\"addressRegion\"]/text()').extract_first()\n\n properties = {\n 'ref': \"_\".join(re.search(r'en/(.*)/(.*)/hoteldetail', response.url).groups()),\n 'name': response.xpath('//meta[@property=\"og:title\"]/@content').extract_first(),\n 'addr_full': street_address.replace(u'\\u00a0', ' ').strip(', ') if street_address else None,\n 'city': city.replace(u'\\u00a0', ' ').strip(', ') if city else None,\n 'state': state.replace(u'\\u00a0', ' ').strip(', ') if state else None,\n 'postcode': response.xpath('//span[@itemprop=\"postalCode\"]/text()').extract_first(),\n 'country': response.xpath('//span[@itemprop=\"addressCountry\"]/text()').extract_first(),\n 'phone': (response.xpath('//span[@itemprop=\"telephone\"]/text()').extract_first() or '').strip('| '),\n 'lat': float(response.xpath('//meta[@property=\"place:location:latitude\"]/@content').extract_first()),\n 'lon': float(response.xpath('//meta[@property=\"place:location:longitude\"]/@content').extract_first()),\n 'website': response.url,\n }\n\n yield GeojsonPointItem(**properties)\n\n def parse_kimpton(self, response):\n url = response.xpath('//a[contains(text(), \"VISIT HOTEL WEBSITE\")]/@href').extract_first()\n properties = {\n 'ref': \"_\".join(re.search(r'en/(.*)/(.*)/hoteldetail', response.url).groups()),\n 'lat': float(response.xpath('//meta[@property=\"place:location:latitude\"]/@content').extract_first()),\n 'lon': float(response.xpath('//meta[@property=\"place:location:longitude\"]/@content').extract_first()),\n }\n if not url: # \"opening soon\" hotels just have teaser pages\n return\n url = url.split('?')[0] # remove querystring\n yield scrapy.Request(url, callback=self.parse_kimpton_data, meta={\"properties\": properties})\n\n def parse_kimpton_data(self, response):\n properties = response.meta[\"properties\"]\n script = response.xpath('//script[@type=\"application/ld+json\"]/text()').extract_first()\n if script:\n data = json.loads(script)\n else:\n data = {}\n if 'name' in data:\n properties.update({\n 'name': data[\"name\"],\n 'addr_full': data[\"address\"][\"streetAddress\"],\n 'city': data[\"address\"][\"addressLocality\"],\n 'state': data[\"address\"].get(\"addressRegion\"),\n 'postcode': data[\"address\"][\"postalCode\"],\n 'country': data[\"address\"].get(\"addressCountry\"),\n 'phone': data.get(\"telephone\"),\n 'website': data[\"url\"]\n })\n\n else:\n street_address = \" \".join(response.xpath('//span[@itemprop=\"streetAddress\"]/p/text()').extract())\n if not street_address:\n street_address = response.xpath('//span[@itemprop=\"streetAddress\"]/text()').extract_first()\n\n city = response.xpath('//span[@itemprop=\"addressLocality\"]/text()').extract_first()\n state = response.xpath('//span[@itemprop=\"addressRegion\"]/text()').extract_first()\n\n properties.update({\n 'name': response.xpath('//meta[@property=\"og:title\"]/@content').extract_first(),\n 'addr_full': street_address.replace(u'\\u00a0', ' ').strip(', ') if street_address else None,\n 'city': city.replace(u'\\u00a0', ' ').strip(', ') if city else None,\n 'state': state.replace(u'\\u00a0', ' ') if state else None,\n 'postcode': response.xpath('//span[@itemprop=\"postalCode\"]/text()').extract_first(),\n 'country': response.xpath('//span[@itemprop=\"addressCountry\"]/text()').extract_first(),\n 'phone': (response.xpath('//span[@itemprop=\"telephone\"]/text()').extract_first() or '').strip('| '),\n 'website': response.url,\n })\n\n yield GeojsonPointItem(**properties)\n\n def parse_regent(self, response):\n data = json.loads(response.xpath('//script[@type=\"application/ld+json\"]/text()').extract_first())\n\n properties = {\n 'ref': \"_\".join(re.search(r'en/(.*)/(.*)/hoteldetail', response.url).groups()),\n 'name': data[\"name\"],\n 'addr_full': data[\"address\"][\"streetAddress\"],\n 'city': data[\"address\"][\"addressLocality\"],\n 'state': data[\"address\"].get(\"addressRegion\"),\n 'postcode': data[\"address\"][\"postalCode\"],\n 'country': data[\"address\"][\"addressCountry\"],\n 'phone': data[\"telephone\"],\n 'lat': float(data[\"geo\"][\"latitude\"]),\n 'lon': float(data[\"geo\"][\"longitude\"]),\n 'website': response.url,\n }\n\n yield GeojsonPointItem(**properties)\n\n def parse_crowne_plaza(self, response):\n address = response.xpath('//a[@class=\"hotel-home\"]/text()').extract_first().strip()\n\n address_parts = address.split('|')\n\n if len(address_parts) == 4: # international addresses\n addr_city, postcode, country, _ = address_parts\n state = ''\n else: # us addresses\n addr_city, state, postcode, country, _ = address_parts\n\n street_address = \",\".join(addr_city.split(',')[0:-1])\n city = addr_city.split(',')[-1]\n\n properties = {\n 'ref': \"_\".join(re.search(r'en/(.*)/(.*)/hoteldetail', response.url).groups()),\n 'name': response.xpath('//meta[@property=\"og:title\"]/@content').extract_first(),\n 'addr_full': street_address.strip(),\n 'city': city.strip(),\n 'state': state.strip(),\n 'postcode': postcode.strip(),\n 'country': country.strip(),\n 'phone': response.xpath('//div[@class=\"new-hinfo-address\"]/p/a[2]/text()').extract_first(),\n 'lat': float(response.xpath('//meta[@property=\"place:location:latitude\"]/@content').extract_first()),\n 'lon': float(response.xpath('//meta[@property=\"place:location:longitude\"]/@content').extract_first()),\n 'website': response.url,\n }\n\n yield GeojsonPointItem(**properties)\n\n def parse_candlewood_staybridge(self, response):\n if 'hoteldetail' not in response.url:\n # got redirected back to search page\n return\n\n street_address = \" \".join(response.xpath('//span[@itemprop=\"streetAddress\"]/p/text()').extract())\n if not street_address:\n street_address = response.xpath('//span[@itemprop=\"streetAddress\"]/text()').extract_first()\n\n region = response.xpath('//span[@itemprop=\"addressRegion\"]/text()').extract_first().replace(u'\\u00a0',' ')\n\n match = re.search(r'([a-z]+)\\s(\\d+)\\s(.*)', region, re.IGNORECASE)\n if match:\n state, postcode, country = match.groups()\n else:\n state, postcode, country = None, None, region.strip()\n\n properties = {\n 'ref': \"_\".join(re.search(r'en/(.*)/(.*)/hoteldetail', response.url).groups()),\n 'name': response.xpath('//meta[@property=\"og:title\"]/@content').extract_first(),\n 'addr_full': street_address.replace(u'\\u00a0', ' ').strip(', '),\n 'city': response.xpath('//span[@itemprop=\"addressLocality\"]/text()').extract_first().replace(u'\\u00a0', ' ').strip(', '),\n 'state': state,\n 'postcode': postcode,\n 'country': country,\n 'phone': response.xpath('//div[@class=\"booking\"]/a/text()').extract_first(),\n 'lat': float(response.xpath('//meta[@property=\"place:location:latitude\"]/@content').extract_first()),\n 'lon': float(response.xpath('//meta[@property=\"place:location:longitude\"]/@content').extract_first()),\n 'website': response.url,\n }\n\n yield GeojsonPointItem(**properties)\n\n def parse_army_hotel(self, response):\n properties = {\n 'ref': \"_\".join(re.search(r'en/(.*)/(.*)/hoteldetail', response.url).groups()),\n 'name': response.xpath('//meta[@property=\"og:title\"]/@content').extract_first(),\n 'addr_full': response.xpath('//meta[@property=\"business:contact_data:street_address\"]/@content').extract_first(),\n 'city': response.xpath('//meta[@property=\"business:contact_data:locality\"]/@content').extract_first(),\n 'state': response.xpath('//meta[@property=\"business:contact_data:region\"]/@content').extract_first(),\n 'postcode': response.xpath('//meta[@property=\"business:contact_data:postal_code\"]/@content').extract_first(),\n 'country': response.xpath('//meta[@property=\"business:contact_data:country_name\"]/@content').extract_first(),\n 'phone': (response.xpath('//span[@title=\"Hotel Front Desk:\"]/span/text()').extract_first() or \"\").strip(),\n 'lat': float(response.xpath('//meta[@property=\"place:location:latitude\"]/@content').extract_first()),\n 'lon': float(response.xpath('//meta[@property=\"place:location:longitude\"]/@content').extract_first()),\n 'website': response.url,\n }\n\n yield GeojsonPointItem(**properties)\n\n def parse(self, response):\n\n hotel_parsers = {\n 'holidayinn': self.parse_hotel,\n 'crowneplaza': self.parse_crowne_plaza,\n 'holidayinnexpress': self.parse_hotel,\n 'hotelindigo': self.parse_hotel,\n 'candlewood': self.parse_candlewood_staybridge,\n 'staybridge': self.parse_candlewood_staybridge,\n 'holidayinnresorts': self.parse_hotel,\n 'intercontinental': self.parse_hotel,\n 'regent': self.parse_regent,\n 'hotels': self.parse_hotel, # vocos\n 'kimptonhotels': self.parse_kimpton,\n 'holidayinnclubvacations': self.parse_hotel,\n 'evenhotels': self.parse_hotel,\n 'avidhotels': self.parse_hotel,\n 'hualuxe': self.parse_hotel,\n 'armyhotels': self.parse_army_hotel\n }\n\n hotel_urls = response.xpath('//div[@class=\"hotelList\"]//div[contains(@class, \"hotelItem\")]//a[contains(@class, \"hotel-name\")]/@href').extract()\n\n if 'armyhotels' in response.url:\n hotel_urls = response.xpath('//div[@id=\"hotelListWrap\"]//a/@href').extract()\n\n if hotel_urls:\n for url in hotel_urls:\n hotel_type = re.search(r'ihg.com/(.*?)/', response.urljoin(url), re.IGNORECASE).group(1)\n\n yield scrapy.Request(response.urljoin(url),\n callback=hotel_parsers[hotel_type])\n\n else:\n urls = response.xpath('//li[@class=\"listingItem\"]/a/@href').extract()\n for url in urls:\n yield scrapy.Request(response.urljoin(url))\n", "path": "locations/spiders/ihg_hotels.py"}]}
| 3,137 | 871 |
gh_patches_debug_14095
|
rasdani/github-patches
|
git_diff
|
localstack__localstack-993
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Send SMS via SNS fails with 502
Hi, we are trying to mock the SMS sending via SMS.
```
aws --endpoint-url=http://localhost:4575 --region=us-east-1 sns publish --message "lorem ipsum" --phone-number 12345
```
Checking the localstack image I can see a Python error in the logs:
```
2018-10-30T13:46:19:ERROR:localstack.services.generic_proxy: Error forwarding request: None Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 181, in forward
path=path, data=data, headers=forward_headers)
File "/opt/code/localstack/localstack/services/sns/sns_listener.py", line 68, in forward_request
for subscriber in SNS_SUBSCRIPTIONS[topic_arn]:
KeyError: None
```
Checking the source code I can see that topic_arn is None because it is not informed in the SNS API Call. If I inform topic_arn everything works fine:
```
aws --endpoint-url=http://localhost:4575 --region=us-east-1 sns publish --message "lorem ipsum" --phone-number 12345 --topic-arn abcd
{
"MessageId": "245292e5"
}
```
Can you guys please check that?
Thanks
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `localstack/services/sns/sns_listener.py`
Content:
```
1 import ast
2 import json
3 import logging
4 import requests
5 import uuid
6 import xmltodict
7 from requests.models import Response
8 from six.moves.urllib import parse as urlparse
9 from localstack.utils.aws import aws_stack
10 from localstack.utils.common import short_uid, to_str
11 from localstack.services.awslambda import lambda_api
12 from localstack.services.generic_proxy import ProxyListener
13
14 # mappings for SNS topic subscriptions
15 SNS_SUBSCRIPTIONS = {}
16
17 # set up logger
18 LOGGER = logging.getLogger(__name__)
19
20
21 class ProxyListenerSNS(ProxyListener):
22
23 def forward_request(self, method, path, data, headers):
24
25 # check region
26 try:
27 aws_stack.check_valid_region(headers)
28 except Exception as e:
29 return make_error(message=str(e), code=400)
30
31 if method == 'POST' and path == '/':
32 req_data = urlparse.parse_qs(to_str(data))
33 req_action = req_data['Action'][0]
34 topic_arn = req_data.get('TargetArn') or req_data.get('TopicArn')
35
36 if topic_arn:
37 topic_arn = topic_arn[0]
38 do_create_topic(topic_arn)
39
40 if req_action == 'SetSubscriptionAttributes':
41 sub = get_subscription_by_arn(req_data['SubscriptionArn'][0])
42 if not sub:
43 return make_error(message='Unable to find subscription for given ARN', code=400)
44 attr_name = req_data['AttributeName'][0]
45 attr_value = req_data['AttributeValue'][0]
46 sub[attr_name] = attr_value
47 return make_response(req_action)
48 elif req_action == 'GetSubscriptionAttributes':
49 sub = get_subscription_by_arn(req_data['SubscriptionArn'][0])
50 if not sub:
51 return make_error(message='Unable to find subscription for given ARN', code=400)
52 content = '<Attributes>'
53 for key, value in sub.items():
54 content += '<entry><key>%s</key><value>%s</value></entry>\n' % (key, value)
55 content += '</Attributes>'
56 return make_response(req_action, content=content)
57 elif req_action == 'Subscribe':
58 if 'Endpoint' not in req_data:
59 return make_error(message='Endpoint not specified in subscription', code=400)
60 elif req_action == 'Unsubscribe':
61 if 'SubscriptionArn' not in req_data:
62 return make_error(message='SubscriptionArn not specified in unsubscribe request', code=400)
63 do_unsubscribe(req_data.get('SubscriptionArn')[0])
64
65 elif req_action == 'Publish':
66 message = req_data['Message'][0]
67 sqs_client = aws_stack.connect_to_service('sqs')
68 for subscriber in SNS_SUBSCRIPTIONS[topic_arn]:
69 filter_policy = json.loads(subscriber.get('FilterPolicy', '{}'))
70 message_attributes = get_message_attributes(req_data)
71 if check_filter_policy(filter_policy, message_attributes):
72 if subscriber['Protocol'] == 'sqs':
73 endpoint = subscriber['Endpoint']
74 if 'sqs_queue_url' in subscriber:
75 queue_url = subscriber.get('sqs_queue_url')
76 elif '://' in endpoint:
77 queue_url = endpoint
78 else:
79 queue_name = endpoint.split(':')[5]
80 queue_url = aws_stack.get_sqs_queue_url(queue_name)
81 subscriber['sqs_queue_url'] = queue_url
82 try:
83 sqs_client.send_message(
84 QueueUrl=queue_url,
85 MessageBody=create_sns_message_body(subscriber, req_data)
86 )
87 except Exception as exc:
88 return make_error(message=str(exc), code=400)
89 elif subscriber['Protocol'] == 'lambda':
90 lambda_api.process_sns_notification(
91 subscriber['Endpoint'],
92 topic_arn, message, subject=req_data.get('Subject', [None])[0]
93 )
94 elif subscriber['Protocol'] in ['http', 'https']:
95 try:
96 message_body = create_sns_message_body(subscriber, req_data)
97 except Exception as exc:
98 return make_error(message=str(exc), code=400)
99 requests.post(
100 subscriber['Endpoint'],
101 headers={
102 'Content-Type': 'text/plain',
103 'x-amz-sns-message-type': 'Notification'
104 },
105 data=message_body
106 )
107 else:
108 LOGGER.warning('Unexpected protocol "%s" for SNS subscription' % subscriber['Protocol'])
109 # return response here because we do not want the request to be forwarded to SNS
110 return make_response(req_action)
111
112 return True
113
114 def return_response(self, method, path, data, headers, response):
115 # This method is executed by the proxy after we've already received a
116 # response from the backend, hence we can utilize the "response" variable here
117 if method == 'POST' and path == '/':
118 req_data = urlparse.parse_qs(to_str(data))
119 req_action = req_data['Action'][0]
120 if req_action == 'Subscribe' and response.status_code < 400:
121 response_data = xmltodict.parse(response.content)
122 topic_arn = (req_data.get('TargetArn') or req_data.get('TopicArn'))[0]
123 sub_arn = response_data['SubscribeResponse']['SubscribeResult']['SubscriptionArn']
124 do_subscribe(topic_arn, req_data['Endpoint'][0], req_data['Protocol'][0], sub_arn)
125
126
127 # instantiate listener
128 UPDATE_SNS = ProxyListenerSNS()
129
130
131 def do_create_topic(topic_arn):
132 if topic_arn not in SNS_SUBSCRIPTIONS:
133 SNS_SUBSCRIPTIONS[topic_arn] = []
134
135
136 def do_subscribe(topic_arn, endpoint, protocol, subscription_arn):
137 subscription = {
138 # http://docs.aws.amazon.com/cli/latest/reference/sns/get-subscription-attributes.html
139 'TopicArn': topic_arn,
140 'Endpoint': endpoint,
141 'Protocol': protocol,
142 'SubscriptionArn': subscription_arn,
143 'RawMessageDelivery': 'false'
144 }
145 SNS_SUBSCRIPTIONS[topic_arn].append(subscription)
146
147
148 def do_unsubscribe(subscription_arn):
149 for topic_arn in SNS_SUBSCRIPTIONS:
150 SNS_SUBSCRIPTIONS[topic_arn] = [
151 sub for sub in SNS_SUBSCRIPTIONS[topic_arn]
152 if sub['SubscriptionArn'] != subscription_arn
153 ]
154
155
156 # ---------------
157 # HELPER METHODS
158 # ---------------
159
160 def get_topic_by_arn(topic_arn):
161 if topic_arn in SNS_SUBSCRIPTIONS:
162 return SNS_SUBSCRIPTIONS[topic_arn]
163 else:
164 return None
165
166
167 def get_subscription_by_arn(sub_arn):
168 # TODO maintain separate map instead of traversing all items
169 for key, subscriptions in SNS_SUBSCRIPTIONS.items():
170 for sub in subscriptions:
171 if sub['SubscriptionArn'] == sub_arn:
172 return sub
173
174
175 def make_response(op_name, content=''):
176 response = Response()
177 if not content:
178 content = '<MessageId>%s</MessageId>' % short_uid()
179 response._content = """<{op_name}Response xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
180 <{op_name}Result>
181 {content}
182 </{op_name}Result>
183 <ResponseMetadata><RequestId>{req_id}</RequestId></ResponseMetadata>
184 </{op_name}Response>""".format(op_name=op_name, content=content, req_id=short_uid())
185 response.status_code = 200
186 return response
187
188
189 def make_error(message, code=400, code_string='InvalidParameter'):
190 response = Response()
191 response._content = """<ErrorResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/"><Error>
192 <Type>Sender</Type>
193 <Code>{code_string}</Code>
194 <Message>{message}</Message>
195 </Error><RequestId>{req_id}</RequestId>
196 </ErrorResponse>""".format(message=message, code_string=code_string, req_id=short_uid())
197 response.status_code = code
198 return response
199
200
201 def create_sns_message_body(subscriber, req_data):
202 message = req_data['Message'][0]
203 subject = req_data.get('Subject', [None])[0]
204 protocol = subscriber['Protocol']
205
206 if subscriber['RawMessageDelivery'] == 'true':
207 return message
208
209 if req_data.get('MessageStructure') == ['json']:
210 message = json.loads(message)
211 try:
212 message = message.get(protocol, message['default'])
213 except KeyError:
214 raise Exception("Unable to find 'default' key in message payload")
215
216 data = {}
217 data['MessageId'] = str(uuid.uuid4())
218 data['Type'] = 'Notification'
219 data['Message'] = message
220 data['TopicArn'] = subscriber['TopicArn']
221 if subject is not None:
222 data['Subject'] = subject
223 attributes = get_message_attributes(req_data)
224 if attributes:
225 data['MessageAttributes'] = attributes
226 return json.dumps(data)
227
228
229 def get_message_attributes(req_data):
230 attributes = {}
231 x = 1
232 while True:
233 name = req_data.get('MessageAttributes.entry.' + str(x) + '.Name', [None])[0]
234 if name is not None:
235 attribute = {}
236 attribute['Type'] = req_data.get('MessageAttributes.entry.' + str(x) + '.Value.DataType', [None])[0]
237 string_value = req_data.get('MessageAttributes.entry.' + str(x) + '.Value.StringValue', [None])[0]
238 binary_value = req_data.get('MessageAttributes.entry.' + str(x) + '.Value.BinaryValue', [None])[0]
239 if string_value is not None:
240 attribute['Value'] = string_value
241 elif binary_value is not None:
242 attribute['Value'] = binary_value
243
244 attributes[name] = attribute
245 x += 1
246 else:
247 break
248
249 return attributes
250
251
252 def is_number(x):
253 try:
254 float(x)
255 return True
256 except ValueError:
257 return False
258
259
260 def evaluate_numeric_condition(conditions, value):
261 if not is_number(value):
262 return False
263
264 for i in range(0, len(conditions), 2):
265 operator = conditions[i]
266 operand = conditions[i + 1]
267
268 if operator == '=':
269 if value != operand:
270 return False
271 elif operator == '>':
272 if value <= operand:
273 return False
274 elif operator == '<':
275 if value >= operand:
276 return False
277 elif operator == '>=':
278 if value < operand:
279 return False
280 elif operator == '<=':
281 if value > operand:
282 return False
283
284 return True
285
286
287 def evaluate_condition(value, condition):
288 if type(condition) is not dict:
289 return value == condition
290 elif condition.get('anything-but'):
291 return value not in condition.get('anything-but')
292 elif condition.get('prefix'):
293 prefix = condition.get('prefix')
294 return value.startswith(prefix)
295 elif condition.get('numeric'):
296 return evaluate_numeric_condition(condition.get('numeric'), value)
297
298 return False
299
300
301 def evaluate_filter_policy_conditions(conditions, attribute):
302 if type(conditions) is not list:
303 conditions = [conditions]
304
305 if attribute['Type'] == 'String.Array':
306 values = ast.literal_eval(attribute['Value'])
307 for value in values:
308 for condition in conditions:
309 if evaluate_condition(value, condition):
310 return True
311 else:
312 for condition in conditions:
313 if evaluate_condition(attribute['Value'], condition):
314 return True
315
316 return False
317
318
319 def check_filter_policy(filter_policy, message_attributes):
320 if not filter_policy:
321 return True
322
323 for criteria in filter_policy:
324 conditions = filter_policy.get(criteria)
325 attribute = message_attributes.get(criteria)
326
327 if attribute is None:
328 return False
329
330 if evaluate_filter_policy_conditions(conditions, attribute) is False:
331 return False
332
333 return True
334
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/localstack/services/sns/sns_listener.py b/localstack/services/sns/sns_listener.py
--- a/localstack/services/sns/sns_listener.py
+++ b/localstack/services/sns/sns_listener.py
@@ -65,7 +65,7 @@
elif req_action == 'Publish':
message = req_data['Message'][0]
sqs_client = aws_stack.connect_to_service('sqs')
- for subscriber in SNS_SUBSCRIPTIONS[topic_arn]:
+ for subscriber in SNS_SUBSCRIPTIONS.get(topic_arn, []):
filter_policy = json.loads(subscriber.get('FilterPolicy', '{}'))
message_attributes = get_message_attributes(req_data)
if check_filter_policy(filter_policy, message_attributes):
|
{"golden_diff": "diff --git a/localstack/services/sns/sns_listener.py b/localstack/services/sns/sns_listener.py\n--- a/localstack/services/sns/sns_listener.py\n+++ b/localstack/services/sns/sns_listener.py\n@@ -65,7 +65,7 @@\n elif req_action == 'Publish':\n message = req_data['Message'][0]\n sqs_client = aws_stack.connect_to_service('sqs')\n- for subscriber in SNS_SUBSCRIPTIONS[topic_arn]:\n+ for subscriber in SNS_SUBSCRIPTIONS.get(topic_arn, []):\n filter_policy = json.loads(subscriber.get('FilterPolicy', '{}'))\n message_attributes = get_message_attributes(req_data)\n if check_filter_policy(filter_policy, message_attributes):\n", "issue": "Send SMS via SNS fails with 502\nHi, we are trying to mock the SMS sending via SMS.\r\n```\r\naws --endpoint-url=http://localhost:4575 --region=us-east-1 sns publish --message \"lorem ipsum\" --phone-number 12345\r\n```\r\nChecking the localstack image I can see a Python error in the logs:\r\n```\r\n2018-10-30T13:46:19:ERROR:localstack.services.generic_proxy: Error forwarding request: None Traceback (most recent call last):\r\n File \"/opt/code/localstack/localstack/services/generic_proxy.py\", line 181, in forward\r\n path=path, data=data, headers=forward_headers)\r\n File \"/opt/code/localstack/localstack/services/sns/sns_listener.py\", line 68, in forward_request\r\n for subscriber in SNS_SUBSCRIPTIONS[topic_arn]:\r\nKeyError: None\r\n```\r\nChecking the source code I can see that topic_arn is None because it is not informed in the SNS API Call. If I inform topic_arn everything works fine:\r\n```\r\naws --endpoint-url=http://localhost:4575 --region=us-east-1 sns publish --message \"lorem ipsum\" --phone-number 12345 --topic-arn abcd\r\n{\r\n \"MessageId\": \"245292e5\"\r\n}\r\n```\r\n\r\nCan you guys please check that?\r\nThanks\n", "before_files": [{"content": "import ast\nimport json\nimport logging\nimport requests\nimport uuid\nimport xmltodict\nfrom requests.models import Response\nfrom six.moves.urllib import parse as urlparse\nfrom localstack.utils.aws import aws_stack\nfrom localstack.utils.common import short_uid, to_str\nfrom localstack.services.awslambda import lambda_api\nfrom localstack.services.generic_proxy import ProxyListener\n\n# mappings for SNS topic subscriptions\nSNS_SUBSCRIPTIONS = {}\n\n# set up logger\nLOGGER = logging.getLogger(__name__)\n\n\nclass ProxyListenerSNS(ProxyListener):\n\n def forward_request(self, method, path, data, headers):\n\n # check region\n try:\n aws_stack.check_valid_region(headers)\n except Exception as e:\n return make_error(message=str(e), code=400)\n\n if method == 'POST' and path == '/':\n req_data = urlparse.parse_qs(to_str(data))\n req_action = req_data['Action'][0]\n topic_arn = req_data.get('TargetArn') or req_data.get('TopicArn')\n\n if topic_arn:\n topic_arn = topic_arn[0]\n do_create_topic(topic_arn)\n\n if req_action == 'SetSubscriptionAttributes':\n sub = get_subscription_by_arn(req_data['SubscriptionArn'][0])\n if not sub:\n return make_error(message='Unable to find subscription for given ARN', code=400)\n attr_name = req_data['AttributeName'][0]\n attr_value = req_data['AttributeValue'][0]\n sub[attr_name] = attr_value\n return make_response(req_action)\n elif req_action == 'GetSubscriptionAttributes':\n sub = get_subscription_by_arn(req_data['SubscriptionArn'][0])\n if not sub:\n return make_error(message='Unable to find subscription for given ARN', code=400)\n content = '<Attributes>'\n for key, value in sub.items():\n content += '<entry><key>%s</key><value>%s</value></entry>\\n' % (key, value)\n content += '</Attributes>'\n return make_response(req_action, content=content)\n elif req_action == 'Subscribe':\n if 'Endpoint' not in req_data:\n return make_error(message='Endpoint not specified in subscription', code=400)\n elif req_action == 'Unsubscribe':\n if 'SubscriptionArn' not in req_data:\n return make_error(message='SubscriptionArn not specified in unsubscribe request', code=400)\n do_unsubscribe(req_data.get('SubscriptionArn')[0])\n\n elif req_action == 'Publish':\n message = req_data['Message'][0]\n sqs_client = aws_stack.connect_to_service('sqs')\n for subscriber in SNS_SUBSCRIPTIONS[topic_arn]:\n filter_policy = json.loads(subscriber.get('FilterPolicy', '{}'))\n message_attributes = get_message_attributes(req_data)\n if check_filter_policy(filter_policy, message_attributes):\n if subscriber['Protocol'] == 'sqs':\n endpoint = subscriber['Endpoint']\n if 'sqs_queue_url' in subscriber:\n queue_url = subscriber.get('sqs_queue_url')\n elif '://' in endpoint:\n queue_url = endpoint\n else:\n queue_name = endpoint.split(':')[5]\n queue_url = aws_stack.get_sqs_queue_url(queue_name)\n subscriber['sqs_queue_url'] = queue_url\n try:\n sqs_client.send_message(\n QueueUrl=queue_url,\n MessageBody=create_sns_message_body(subscriber, req_data)\n )\n except Exception as exc:\n return make_error(message=str(exc), code=400)\n elif subscriber['Protocol'] == 'lambda':\n lambda_api.process_sns_notification(\n subscriber['Endpoint'],\n topic_arn, message, subject=req_data.get('Subject', [None])[0]\n )\n elif subscriber['Protocol'] in ['http', 'https']:\n try:\n message_body = create_sns_message_body(subscriber, req_data)\n except Exception as exc:\n return make_error(message=str(exc), code=400)\n requests.post(\n subscriber['Endpoint'],\n headers={\n 'Content-Type': 'text/plain',\n 'x-amz-sns-message-type': 'Notification'\n },\n data=message_body\n )\n else:\n LOGGER.warning('Unexpected protocol \"%s\" for SNS subscription' % subscriber['Protocol'])\n # return response here because we do not want the request to be forwarded to SNS\n return make_response(req_action)\n\n return True\n\n def return_response(self, method, path, data, headers, response):\n # This method is executed by the proxy after we've already received a\n # response from the backend, hence we can utilize the \"response\" variable here\n if method == 'POST' and path == '/':\n req_data = urlparse.parse_qs(to_str(data))\n req_action = req_data['Action'][0]\n if req_action == 'Subscribe' and response.status_code < 400:\n response_data = xmltodict.parse(response.content)\n topic_arn = (req_data.get('TargetArn') or req_data.get('TopicArn'))[0]\n sub_arn = response_data['SubscribeResponse']['SubscribeResult']['SubscriptionArn']\n do_subscribe(topic_arn, req_data['Endpoint'][0], req_data['Protocol'][0], sub_arn)\n\n\n# instantiate listener\nUPDATE_SNS = ProxyListenerSNS()\n\n\ndef do_create_topic(topic_arn):\n if topic_arn not in SNS_SUBSCRIPTIONS:\n SNS_SUBSCRIPTIONS[topic_arn] = []\n\n\ndef do_subscribe(topic_arn, endpoint, protocol, subscription_arn):\n subscription = {\n # http://docs.aws.amazon.com/cli/latest/reference/sns/get-subscription-attributes.html\n 'TopicArn': topic_arn,\n 'Endpoint': endpoint,\n 'Protocol': protocol,\n 'SubscriptionArn': subscription_arn,\n 'RawMessageDelivery': 'false'\n }\n SNS_SUBSCRIPTIONS[topic_arn].append(subscription)\n\n\ndef do_unsubscribe(subscription_arn):\n for topic_arn in SNS_SUBSCRIPTIONS:\n SNS_SUBSCRIPTIONS[topic_arn] = [\n sub for sub in SNS_SUBSCRIPTIONS[topic_arn]\n if sub['SubscriptionArn'] != subscription_arn\n ]\n\n\n# ---------------\n# HELPER METHODS\n# ---------------\n\ndef get_topic_by_arn(topic_arn):\n if topic_arn in SNS_SUBSCRIPTIONS:\n return SNS_SUBSCRIPTIONS[topic_arn]\n else:\n return None\n\n\ndef get_subscription_by_arn(sub_arn):\n # TODO maintain separate map instead of traversing all items\n for key, subscriptions in SNS_SUBSCRIPTIONS.items():\n for sub in subscriptions:\n if sub['SubscriptionArn'] == sub_arn:\n return sub\n\n\ndef make_response(op_name, content=''):\n response = Response()\n if not content:\n content = '<MessageId>%s</MessageId>' % short_uid()\n response._content = \"\"\"<{op_name}Response xmlns=\"http://sns.amazonaws.com/doc/2010-03-31/\">\n <{op_name}Result>\n {content}\n </{op_name}Result>\n <ResponseMetadata><RequestId>{req_id}</RequestId></ResponseMetadata>\n </{op_name}Response>\"\"\".format(op_name=op_name, content=content, req_id=short_uid())\n response.status_code = 200\n return response\n\n\ndef make_error(message, code=400, code_string='InvalidParameter'):\n response = Response()\n response._content = \"\"\"<ErrorResponse xmlns=\"http://sns.amazonaws.com/doc/2010-03-31/\"><Error>\n <Type>Sender</Type>\n <Code>{code_string}</Code>\n <Message>{message}</Message>\n </Error><RequestId>{req_id}</RequestId>\n </ErrorResponse>\"\"\".format(message=message, code_string=code_string, req_id=short_uid())\n response.status_code = code\n return response\n\n\ndef create_sns_message_body(subscriber, req_data):\n message = req_data['Message'][0]\n subject = req_data.get('Subject', [None])[0]\n protocol = subscriber['Protocol']\n\n if subscriber['RawMessageDelivery'] == 'true':\n return message\n\n if req_data.get('MessageStructure') == ['json']:\n message = json.loads(message)\n try:\n message = message.get(protocol, message['default'])\n except KeyError:\n raise Exception(\"Unable to find 'default' key in message payload\")\n\n data = {}\n data['MessageId'] = str(uuid.uuid4())\n data['Type'] = 'Notification'\n data['Message'] = message\n data['TopicArn'] = subscriber['TopicArn']\n if subject is not None:\n data['Subject'] = subject\n attributes = get_message_attributes(req_data)\n if attributes:\n data['MessageAttributes'] = attributes\n return json.dumps(data)\n\n\ndef get_message_attributes(req_data):\n attributes = {}\n x = 1\n while True:\n name = req_data.get('MessageAttributes.entry.' + str(x) + '.Name', [None])[0]\n if name is not None:\n attribute = {}\n attribute['Type'] = req_data.get('MessageAttributes.entry.' + str(x) + '.Value.DataType', [None])[0]\n string_value = req_data.get('MessageAttributes.entry.' + str(x) + '.Value.StringValue', [None])[0]\n binary_value = req_data.get('MessageAttributes.entry.' + str(x) + '.Value.BinaryValue', [None])[0]\n if string_value is not None:\n attribute['Value'] = string_value\n elif binary_value is not None:\n attribute['Value'] = binary_value\n\n attributes[name] = attribute\n x += 1\n else:\n break\n\n return attributes\n\n\ndef is_number(x):\n try:\n float(x)\n return True\n except ValueError:\n return False\n\n\ndef evaluate_numeric_condition(conditions, value):\n if not is_number(value):\n return False\n\n for i in range(0, len(conditions), 2):\n operator = conditions[i]\n operand = conditions[i + 1]\n\n if operator == '=':\n if value != operand:\n return False\n elif operator == '>':\n if value <= operand:\n return False\n elif operator == '<':\n if value >= operand:\n return False\n elif operator == '>=':\n if value < operand:\n return False\n elif operator == '<=':\n if value > operand:\n return False\n\n return True\n\n\ndef evaluate_condition(value, condition):\n if type(condition) is not dict:\n return value == condition\n elif condition.get('anything-but'):\n return value not in condition.get('anything-but')\n elif condition.get('prefix'):\n prefix = condition.get('prefix')\n return value.startswith(prefix)\n elif condition.get('numeric'):\n return evaluate_numeric_condition(condition.get('numeric'), value)\n\n return False\n\n\ndef evaluate_filter_policy_conditions(conditions, attribute):\n if type(conditions) is not list:\n conditions = [conditions]\n\n if attribute['Type'] == 'String.Array':\n values = ast.literal_eval(attribute['Value'])\n for value in values:\n for condition in conditions:\n if evaluate_condition(value, condition):\n return True\n else:\n for condition in conditions:\n if evaluate_condition(attribute['Value'], condition):\n return True\n\n return False\n\n\ndef check_filter_policy(filter_policy, message_attributes):\n if not filter_policy:\n return True\n\n for criteria in filter_policy:\n conditions = filter_policy.get(criteria)\n attribute = message_attributes.get(criteria)\n\n if attribute is None:\n return False\n\n if evaluate_filter_policy_conditions(conditions, attribute) is False:\n return False\n\n return True\n", "path": "localstack/services/sns/sns_listener.py"}], "after_files": [{"content": "import ast\nimport json\nimport logging\nimport requests\nimport uuid\nimport xmltodict\nfrom requests.models import Response\nfrom six.moves.urllib import parse as urlparse\nfrom localstack.utils.aws import aws_stack\nfrom localstack.utils.common import short_uid, to_str\nfrom localstack.services.awslambda import lambda_api\nfrom localstack.services.generic_proxy import ProxyListener\n\n# mappings for SNS topic subscriptions\nSNS_SUBSCRIPTIONS = {}\n\n# set up logger\nLOGGER = logging.getLogger(__name__)\n\n\nclass ProxyListenerSNS(ProxyListener):\n\n def forward_request(self, method, path, data, headers):\n\n # check region\n try:\n aws_stack.check_valid_region(headers)\n except Exception as e:\n return make_error(message=str(e), code=400)\n\n if method == 'POST' and path == '/':\n req_data = urlparse.parse_qs(to_str(data))\n req_action = req_data['Action'][0]\n topic_arn = req_data.get('TargetArn') or req_data.get('TopicArn')\n\n if topic_arn:\n topic_arn = topic_arn[0]\n do_create_topic(topic_arn)\n\n if req_action == 'SetSubscriptionAttributes':\n sub = get_subscription_by_arn(req_data['SubscriptionArn'][0])\n if not sub:\n return make_error(message='Unable to find subscription for given ARN', code=400)\n attr_name = req_data['AttributeName'][0]\n attr_value = req_data['AttributeValue'][0]\n sub[attr_name] = attr_value\n return make_response(req_action)\n elif req_action == 'GetSubscriptionAttributes':\n sub = get_subscription_by_arn(req_data['SubscriptionArn'][0])\n if not sub:\n return make_error(message='Unable to find subscription for given ARN', code=400)\n content = '<Attributes>'\n for key, value in sub.items():\n content += '<entry><key>%s</key><value>%s</value></entry>\\n' % (key, value)\n content += '</Attributes>'\n return make_response(req_action, content=content)\n elif req_action == 'Subscribe':\n if 'Endpoint' not in req_data:\n return make_error(message='Endpoint not specified in subscription', code=400)\n elif req_action == 'Unsubscribe':\n if 'SubscriptionArn' not in req_data:\n return make_error(message='SubscriptionArn not specified in unsubscribe request', code=400)\n do_unsubscribe(req_data.get('SubscriptionArn')[0])\n\n elif req_action == 'Publish':\n message = req_data['Message'][0]\n sqs_client = aws_stack.connect_to_service('sqs')\n for subscriber in SNS_SUBSCRIPTIONS.get(topic_arn, []):\n filter_policy = json.loads(subscriber.get('FilterPolicy', '{}'))\n message_attributes = get_message_attributes(req_data)\n if check_filter_policy(filter_policy, message_attributes):\n if subscriber['Protocol'] == 'sqs':\n endpoint = subscriber['Endpoint']\n if 'sqs_queue_url' in subscriber:\n queue_url = subscriber.get('sqs_queue_url')\n elif '://' in endpoint:\n queue_url = endpoint\n else:\n queue_name = endpoint.split(':')[5]\n queue_url = aws_stack.get_sqs_queue_url(queue_name)\n subscriber['sqs_queue_url'] = queue_url\n try:\n sqs_client.send_message(\n QueueUrl=queue_url,\n MessageBody=create_sns_message_body(subscriber, req_data)\n )\n except Exception as exc:\n return make_error(message=str(exc), code=400)\n elif subscriber['Protocol'] == 'lambda':\n lambda_api.process_sns_notification(\n subscriber['Endpoint'],\n topic_arn, message, subject=req_data.get('Subject', [None])[0]\n )\n elif subscriber['Protocol'] in ['http', 'https']:\n try:\n message_body = create_sns_message_body(subscriber, req_data)\n except Exception as exc:\n return make_error(message=str(exc), code=400)\n requests.post(\n subscriber['Endpoint'],\n headers={\n 'Content-Type': 'text/plain',\n 'x-amz-sns-message-type': 'Notification'\n },\n data=message_body\n )\n else:\n LOGGER.warning('Unexpected protocol \"%s\" for SNS subscription' % subscriber['Protocol'])\n # return response here because we do not want the request to be forwarded to SNS\n return make_response(req_action)\n\n return True\n\n def return_response(self, method, path, data, headers, response):\n # This method is executed by the proxy after we've already received a\n # response from the backend, hence we can utilize the \"response\" variable here\n if method == 'POST' and path == '/':\n req_data = urlparse.parse_qs(to_str(data))\n req_action = req_data['Action'][0]\n if req_action == 'Subscribe' and response.status_code < 400:\n response_data = xmltodict.parse(response.content)\n topic_arn = (req_data.get('TargetArn') or req_data.get('TopicArn'))[0]\n sub_arn = response_data['SubscribeResponse']['SubscribeResult']['SubscriptionArn']\n do_subscribe(topic_arn, req_data['Endpoint'][0], req_data['Protocol'][0], sub_arn)\n\n\n# instantiate listener\nUPDATE_SNS = ProxyListenerSNS()\n\n\ndef do_create_topic(topic_arn):\n if topic_arn not in SNS_SUBSCRIPTIONS:\n SNS_SUBSCRIPTIONS[topic_arn] = []\n\n\ndef do_subscribe(topic_arn, endpoint, protocol, subscription_arn):\n subscription = {\n # http://docs.aws.amazon.com/cli/latest/reference/sns/get-subscription-attributes.html\n 'TopicArn': topic_arn,\n 'Endpoint': endpoint,\n 'Protocol': protocol,\n 'SubscriptionArn': subscription_arn,\n 'RawMessageDelivery': 'false'\n }\n SNS_SUBSCRIPTIONS[topic_arn].append(subscription)\n\n\ndef do_unsubscribe(subscription_arn):\n for topic_arn in SNS_SUBSCRIPTIONS:\n SNS_SUBSCRIPTIONS[topic_arn] = [\n sub for sub in SNS_SUBSCRIPTIONS[topic_arn]\n if sub['SubscriptionArn'] != subscription_arn\n ]\n\n\n# ---------------\n# HELPER METHODS\n# ---------------\n\ndef get_topic_by_arn(topic_arn):\n if topic_arn in SNS_SUBSCRIPTIONS:\n return SNS_SUBSCRIPTIONS[topic_arn]\n else:\n return None\n\n\ndef get_subscription_by_arn(sub_arn):\n # TODO maintain separate map instead of traversing all items\n for key, subscriptions in SNS_SUBSCRIPTIONS.items():\n for sub in subscriptions:\n if sub['SubscriptionArn'] == sub_arn:\n return sub\n\n\ndef make_response(op_name, content=''):\n response = Response()\n if not content:\n content = '<MessageId>%s</MessageId>' % short_uid()\n response._content = \"\"\"<{op_name}Response xmlns=\"http://sns.amazonaws.com/doc/2010-03-31/\">\n <{op_name}Result>\n {content}\n </{op_name}Result>\n <ResponseMetadata><RequestId>{req_id}</RequestId></ResponseMetadata>\n </{op_name}Response>\"\"\".format(op_name=op_name, content=content, req_id=short_uid())\n response.status_code = 200\n return response\n\n\ndef make_error(message, code=400, code_string='InvalidParameter'):\n response = Response()\n response._content = \"\"\"<ErrorResponse xmlns=\"http://sns.amazonaws.com/doc/2010-03-31/\"><Error>\n <Type>Sender</Type>\n <Code>{code_string}</Code>\n <Message>{message}</Message>\n </Error><RequestId>{req_id}</RequestId>\n </ErrorResponse>\"\"\".format(message=message, code_string=code_string, req_id=short_uid())\n response.status_code = code\n return response\n\n\ndef create_sns_message_body(subscriber, req_data):\n message = req_data['Message'][0]\n subject = req_data.get('Subject', [None])[0]\n protocol = subscriber['Protocol']\n\n if subscriber['RawMessageDelivery'] == 'true':\n return message\n\n if req_data.get('MessageStructure') == ['json']:\n message = json.loads(message)\n try:\n message = message.get(protocol, message['default'])\n except KeyError:\n raise Exception(\"Unable to find 'default' key in message payload\")\n\n data = {}\n data['MessageId'] = str(uuid.uuid4())\n data['Type'] = 'Notification'\n data['Message'] = message\n data['TopicArn'] = subscriber['TopicArn']\n if subject is not None:\n data['Subject'] = subject\n attributes = get_message_attributes(req_data)\n if attributes:\n data['MessageAttributes'] = attributes\n return json.dumps(data)\n\n\ndef get_message_attributes(req_data):\n attributes = {}\n x = 1\n while True:\n name = req_data.get('MessageAttributes.entry.' + str(x) + '.Name', [None])[0]\n if name is not None:\n attribute = {}\n attribute['Type'] = req_data.get('MessageAttributes.entry.' + str(x) + '.Value.DataType', [None])[0]\n string_value = req_data.get('MessageAttributes.entry.' + str(x) + '.Value.StringValue', [None])[0]\n binary_value = req_data.get('MessageAttributes.entry.' + str(x) + '.Value.BinaryValue', [None])[0]\n if string_value is not None:\n attribute['Value'] = string_value\n elif binary_value is not None:\n attribute['Value'] = binary_value\n\n attributes[name] = attribute\n x += 1\n else:\n break\n\n return attributes\n\n\ndef is_number(x):\n try:\n float(x)\n return True\n except ValueError:\n return False\n\n\ndef evaluate_numeric_condition(conditions, value):\n if not is_number(value):\n return False\n\n for i in range(0, len(conditions), 2):\n operator = conditions[i]\n operand = conditions[i + 1]\n\n if operator == '=':\n if value != operand:\n return False\n elif operator == '>':\n if value <= operand:\n return False\n elif operator == '<':\n if value >= operand:\n return False\n elif operator == '>=':\n if value < operand:\n return False\n elif operator == '<=':\n if value > operand:\n return False\n\n return True\n\n\ndef evaluate_condition(value, condition):\n if type(condition) is not dict:\n return value == condition\n elif condition.get('anything-but'):\n return value not in condition.get('anything-but')\n elif condition.get('prefix'):\n prefix = condition.get('prefix')\n return value.startswith(prefix)\n elif condition.get('numeric'):\n return evaluate_numeric_condition(condition.get('numeric'), value)\n\n return False\n\n\ndef evaluate_filter_policy_conditions(conditions, attribute):\n if type(conditions) is not list:\n conditions = [conditions]\n\n if attribute['Type'] == 'String.Array':\n values = ast.literal_eval(attribute['Value'])\n for value in values:\n for condition in conditions:\n if evaluate_condition(value, condition):\n return True\n else:\n for condition in conditions:\n if evaluate_condition(attribute['Value'], condition):\n return True\n\n return False\n\n\ndef check_filter_policy(filter_policy, message_attributes):\n if not filter_policy:\n return True\n\n for criteria in filter_policy:\n conditions = filter_policy.get(criteria)\n attribute = message_attributes.get(criteria)\n\n if attribute is None:\n return False\n\n if evaluate_filter_policy_conditions(conditions, attribute) is False:\n return False\n\n return True\n", "path": "localstack/services/sns/sns_listener.py"}]}
| 4,052 | 160 |
gh_patches_debug_1005
|
rasdani/github-patches
|
git_diff
|
Pycord-Development__pycord-1218
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mypy can't type check pycord when namespace_packages are enabled
### Summary
Mypy errors when using pycord with namespace_packages flag enabled
### Reproduction Steps
Run mypy against a simple pycord setup.
An example set up is as follows:
```
my-repo/
├─ my_bot/
│ ├─ bot.py
.mypy.ini
```
Run mypy via: `mypy my_bot/`
Mypy config:
```ini
[mypy]
namespace_packages = True
ignore_missing_imports = True
```
### Minimal Reproducible Code
```python
`from discord import ApplicationCommand` in bot.py
```
### Expected Results
Type checking works as expected with `namespace_packages` enabled
### Actual Results
Type checking errors with:
```sh
virtual-env-path/lib/python3.9/site-packages/discord/commands/__init__.py: error: Source file found twice under different module names: "discord.commands.__init__" and "discord.commands"
Found 1 error in 1 file (errors prevented further checking)
```
### Intents
N/A
### System Information
```yaml
- Python v3.9.5-final
- py-cord v2.0.0-beta
- py-cord pkg_resources: v2.0.0b3
- aiohttp v3.8.1
- system info: Darwin 20.6.0 Darwin Kernel Version 20.6.0: Tue Oct 12 18:33:42 PDT 2021; root:xnu-7195.141.8~1/RELEASE_X86_64
```
### Checklist
- [X] I have searched the open issues for duplicates.
- [X] I have shown the entire traceback, if possible.
- [X] I have removed my token from display, if visible.
### Additional Context
Mypy won't error is `namespace_packages` is `False` but then it cannot infer the types properly and will result in errors such as:
```sh
app/bot.py:1: error: Module "discord" has no attribute "ApplicationCommand"; maybe "ApplicationCommandMixin"?
```
This issue is also persistent in nextcord however, nextcord is available under `discord` and `nextcord` so in `nextcord` this issue is fixed by changing the import to `from nextcord import ApplicationCommand`. Pycord doesn't expose the package as `pycord`. Any reason for this?.
Mypy can't type check pycord when namespace_packages are enabled
### Summary
Mypy errors when using pycord with namespace_packages flag enabled
### Reproduction Steps
Run mypy against a simple pycord setup.
An example set up is as follows:
```
my-repo/
├─ my_bot/
│ ├─ bot.py
.mypy.ini
```
Run mypy via: `mypy my_bot/`
Mypy config:
```ini
[mypy]
namespace_packages = True
ignore_missing_imports = True
```
### Minimal Reproducible Code
```python
`from discord import ApplicationCommand` in bot.py
```
### Expected Results
Type checking works as expected with `namespace_packages` enabled
### Actual Results
Type checking errors with:
```sh
virtual-env-path/lib/python3.9/site-packages/discord/commands/__init__.py: error: Source file found twice under different module names: "discord.commands.__init__" and "discord.commands"
Found 1 error in 1 file (errors prevented further checking)
```
### Intents
N/A
### System Information
```yaml
- Python v3.9.5-final
- py-cord v2.0.0-beta
- py-cord pkg_resources: v2.0.0b3
- aiohttp v3.8.1
- system info: Darwin 20.6.0 Darwin Kernel Version 20.6.0: Tue Oct 12 18:33:42 PDT 2021; root:xnu-7195.141.8~1/RELEASE_X86_64
```
### Checklist
- [X] I have searched the open issues for duplicates.
- [X] I have shown the entire traceback, if possible.
- [X] I have removed my token from display, if visible.
### Additional Context
Mypy won't error is `namespace_packages` is `False` but then it cannot infer the types properly and will result in errors such as:
```sh
app/bot.py:1: error: Module "discord" has no attribute "ApplicationCommand"; maybe "ApplicationCommandMixin"?
```
This issue is also persistent in nextcord however, nextcord is available under `discord` and `nextcord` so in `nextcord` this issue is fixed by changing the import to `from nextcord import ApplicationCommand`. Pycord doesn't expose the package as `pycord`. Any reason for this?.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `discord/__init__.py`
Content:
```
1 """
2 Discord API Wrapper
3 ~~~~~~~~~~~~~~~~~~~
4
5 A basic wrapper for the Discord API.
6
7 :copyright: (c) 2015-2021 Rapptz & (c) 2021-present Pycord Development
8 :license: MIT, see LICENSE for more details.
9
10 """
11
12 __title__ = "pycord"
13 __author__ = "Pycord Development"
14 __license__ = "MIT"
15 __copyright__ = "Copyright 2015-2021 Rapptz & Copyright 2021-present Pycord Development"
16 __version__ = "2.0.0b5"
17
18 __path__ = __import__("pkgutil").extend_path(__path__, __name__)
19
20 import logging
21 from typing import Literal, NamedTuple
22
23 from . import abc, opus, sinks, ui, utils
24 from .activity import *
25 from .appinfo import *
26 from .asset import *
27 from .audit_logs import *
28 from .bot import *
29 from .channel import *
30 from .client import *
31 from .cog import Cog
32 from .colour import *
33 from .commands.__init__ import *
34 from .components import *
35 from .embeds import *
36 from .emoji import *
37 from .enums import *
38 from .errors import *
39 from .file import *
40 from .flags import *
41 from .guild import *
42 from .http import *
43 from .integrations import *
44 from .interactions import *
45 from .invite import *
46 from .member import *
47 from .mentions import *
48 from .message import *
49 from .object import *
50 from .partial_emoji import *
51 from .permissions import *
52 from .player import *
53 from .raw_models import *
54 from .reaction import *
55 from .role import *
56 from .scheduled_events import *
57 from .shard import *
58 from .stage_instance import *
59 from .sticker import *
60 from .team import *
61 from .template import *
62 from .threads import *
63 from .user import *
64 from .voice_client import *
65 from .webhook import *
66 from .welcome_screen import *
67 from .widget import *
68
69
70 class VersionInfo(NamedTuple):
71 major: int
72 minor: int
73 micro: int
74 releaselevel: Literal["alpha", "beta", "candidate", "final"]
75 serial: int
76
77
78 version_info: VersionInfo = VersionInfo(major=2, minor=0, micro=0, releaselevel="beta", serial=5)
79
80 logging.getLogger(__name__).addHandler(logging.NullHandler())
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/discord/__init__.py b/discord/__init__.py
--- a/discord/__init__.py
+++ b/discord/__init__.py
@@ -30,7 +30,7 @@
from .client import *
from .cog import Cog
from .colour import *
-from .commands.__init__ import *
+from .commands import *
from .components import *
from .embeds import *
from .emoji import *
|
{"golden_diff": "diff --git a/discord/__init__.py b/discord/__init__.py\n--- a/discord/__init__.py\n+++ b/discord/__init__.py\n@@ -30,7 +30,7 @@\n from .client import *\n from .cog import Cog\n from .colour import *\n-from .commands.__init__ import *\n+from .commands import *\n from .components import *\n from .embeds import *\n from .emoji import *\n", "issue": "Mypy can't type check pycord when namespace_packages are enabled\n### Summary\r\n\r\nMypy errors when using pycord with namespace_packages flag enabled\r\n\r\n### Reproduction Steps\r\n\r\nRun mypy against a simple pycord setup.\r\n\r\nAn example set up is as follows:\r\n\r\n```\r\nmy-repo/\r\n\u251c\u2500 my_bot/\r\n\u2502 \u251c\u2500 bot.py\r\n.mypy.ini\r\n```\r\n\r\nRun mypy via: `mypy my_bot/`\r\n\r\nMypy config:\r\n```ini\r\n[mypy]\r\nnamespace_packages = True\r\nignore_missing_imports = True\r\n```\r\n\r\n\r\n### Minimal Reproducible Code\r\n\r\n```python\r\n`from discord import ApplicationCommand` in bot.py\r\n```\r\n\r\n\r\n### Expected Results\r\n\r\nType checking works as expected with `namespace_packages` enabled\r\n\r\n### Actual Results\r\n\r\nType checking errors with:\r\n```sh\r\nvirtual-env-path/lib/python3.9/site-packages/discord/commands/__init__.py: error: Source file found twice under different module names: \"discord.commands.__init__\" and \"discord.commands\"\r\nFound 1 error in 1 file (errors prevented further checking)\r\n```\r\n\r\n### Intents\r\n\r\nN/A\r\n\r\n### System Information\r\n\r\n```yaml\r\n- Python v3.9.5-final\r\n- py-cord v2.0.0-beta\r\n - py-cord pkg_resources: v2.0.0b3\r\n- aiohttp v3.8.1\r\n- system info: Darwin 20.6.0 Darwin Kernel Version 20.6.0: Tue Oct 12 18:33:42 PDT 2021; root:xnu-7195.141.8~1/RELEASE_X86_64\r\n```\r\n\r\n### Checklist\r\n\r\n- [X] I have searched the open issues for duplicates.\r\n- [X] I have shown the entire traceback, if possible.\r\n- [X] I have removed my token from display, if visible.\r\n\r\n### Additional Context\r\n\r\nMypy won't error is `namespace_packages` is `False` but then it cannot infer the types properly and will result in errors such as:\r\n```sh\r\napp/bot.py:1: error: Module \"discord\" has no attribute \"ApplicationCommand\"; maybe \"ApplicationCommandMixin\"?\r\n```\r\n\r\nThis issue is also persistent in nextcord however, nextcord is available under `discord` and `nextcord` so in `nextcord` this issue is fixed by changing the import to `from nextcord import ApplicationCommand`. Pycord doesn't expose the package as `pycord`. Any reason for this?.\nMypy can't type check pycord when namespace_packages are enabled\n### Summary\r\n\r\nMypy errors when using pycord with namespace_packages flag enabled\r\n\r\n### Reproduction Steps\r\n\r\nRun mypy against a simple pycord setup.\r\n\r\nAn example set up is as follows:\r\n\r\n```\r\nmy-repo/\r\n\u251c\u2500 my_bot/\r\n\u2502 \u251c\u2500 bot.py\r\n.mypy.ini\r\n```\r\n\r\nRun mypy via: `mypy my_bot/`\r\n\r\nMypy config:\r\n```ini\r\n[mypy]\r\nnamespace_packages = True\r\nignore_missing_imports = True\r\n```\r\n\r\n\r\n### Minimal Reproducible Code\r\n\r\n```python\r\n`from discord import ApplicationCommand` in bot.py\r\n```\r\n\r\n\r\n### Expected Results\r\n\r\nType checking works as expected with `namespace_packages` enabled\r\n\r\n### Actual Results\r\n\r\nType checking errors with:\r\n```sh\r\nvirtual-env-path/lib/python3.9/site-packages/discord/commands/__init__.py: error: Source file found twice under different module names: \"discord.commands.__init__\" and \"discord.commands\"\r\nFound 1 error in 1 file (errors prevented further checking)\r\n```\r\n\r\n### Intents\r\n\r\nN/A\r\n\r\n### System Information\r\n\r\n```yaml\r\n- Python v3.9.5-final\r\n- py-cord v2.0.0-beta\r\n - py-cord pkg_resources: v2.0.0b3\r\n- aiohttp v3.8.1\r\n- system info: Darwin 20.6.0 Darwin Kernel Version 20.6.0: Tue Oct 12 18:33:42 PDT 2021; root:xnu-7195.141.8~1/RELEASE_X86_64\r\n```\r\n\r\n### Checklist\r\n\r\n- [X] I have searched the open issues for duplicates.\r\n- [X] I have shown the entire traceback, if possible.\r\n- [X] I have removed my token from display, if visible.\r\n\r\n### Additional Context\r\n\r\nMypy won't error is `namespace_packages` is `False` but then it cannot infer the types properly and will result in errors such as:\r\n```sh\r\napp/bot.py:1: error: Module \"discord\" has no attribute \"ApplicationCommand\"; maybe \"ApplicationCommandMixin\"?\r\n```\r\n\r\nThis issue is also persistent in nextcord however, nextcord is available under `discord` and `nextcord` so in `nextcord` this issue is fixed by changing the import to `from nextcord import ApplicationCommand`. Pycord doesn't expose the package as `pycord`. Any reason for this?.\n", "before_files": [{"content": "\"\"\"\nDiscord API Wrapper\n~~~~~~~~~~~~~~~~~~~\n\nA basic wrapper for the Discord API.\n\n:copyright: (c) 2015-2021 Rapptz & (c) 2021-present Pycord Development\n:license: MIT, see LICENSE for more details.\n\n\"\"\"\n\n__title__ = \"pycord\"\n__author__ = \"Pycord Development\"\n__license__ = \"MIT\"\n__copyright__ = \"Copyright 2015-2021 Rapptz & Copyright 2021-present Pycord Development\"\n__version__ = \"2.0.0b5\"\n\n__path__ = __import__(\"pkgutil\").extend_path(__path__, __name__)\n\nimport logging\nfrom typing import Literal, NamedTuple\n\nfrom . import abc, opus, sinks, ui, utils\nfrom .activity import *\nfrom .appinfo import *\nfrom .asset import *\nfrom .audit_logs import *\nfrom .bot import *\nfrom .channel import *\nfrom .client import *\nfrom .cog import Cog\nfrom .colour import *\nfrom .commands.__init__ import *\nfrom .components import *\nfrom .embeds import *\nfrom .emoji import *\nfrom .enums import *\nfrom .errors import *\nfrom .file import *\nfrom .flags import *\nfrom .guild import *\nfrom .http import *\nfrom .integrations import *\nfrom .interactions import *\nfrom .invite import *\nfrom .member import *\nfrom .mentions import *\nfrom .message import *\nfrom .object import *\nfrom .partial_emoji import *\nfrom .permissions import *\nfrom .player import *\nfrom .raw_models import *\nfrom .reaction import *\nfrom .role import *\nfrom .scheduled_events import *\nfrom .shard import *\nfrom .stage_instance import *\nfrom .sticker import *\nfrom .team import *\nfrom .template import *\nfrom .threads import *\nfrom .user import *\nfrom .voice_client import *\nfrom .webhook import *\nfrom .welcome_screen import *\nfrom .widget import *\n\n\nclass VersionInfo(NamedTuple):\n major: int\n minor: int\n micro: int\n releaselevel: Literal[\"alpha\", \"beta\", \"candidate\", \"final\"]\n serial: int\n\n\nversion_info: VersionInfo = VersionInfo(major=2, minor=0, micro=0, releaselevel=\"beta\", serial=5)\n\nlogging.getLogger(__name__).addHandler(logging.NullHandler())\n", "path": "discord/__init__.py"}], "after_files": [{"content": "\"\"\"\nDiscord API Wrapper\n~~~~~~~~~~~~~~~~~~~\n\nA basic wrapper for the Discord API.\n\n:copyright: (c) 2015-2021 Rapptz & (c) 2021-present Pycord Development\n:license: MIT, see LICENSE for more details.\n\n\"\"\"\n\n__title__ = \"pycord\"\n__author__ = \"Pycord Development\"\n__license__ = \"MIT\"\n__copyright__ = \"Copyright 2015-2021 Rapptz & Copyright 2021-present Pycord Development\"\n__version__ = \"2.0.0b5\"\n\n__path__ = __import__(\"pkgutil\").extend_path(__path__, __name__)\n\nimport logging\nfrom typing import Literal, NamedTuple\n\nfrom . import abc, opus, sinks, ui, utils\nfrom .activity import *\nfrom .appinfo import *\nfrom .asset import *\nfrom .audit_logs import *\nfrom .bot import *\nfrom .channel import *\nfrom .client import *\nfrom .cog import Cog\nfrom .colour import *\nfrom .commands import *\nfrom .components import *\nfrom .embeds import *\nfrom .emoji import *\nfrom .enums import *\nfrom .errors import *\nfrom .file import *\nfrom .flags import *\nfrom .guild import *\nfrom .http import *\nfrom .integrations import *\nfrom .interactions import *\nfrom .invite import *\nfrom .member import *\nfrom .mentions import *\nfrom .message import *\nfrom .object import *\nfrom .partial_emoji import *\nfrom .permissions import *\nfrom .player import *\nfrom .raw_models import *\nfrom .reaction import *\nfrom .role import *\nfrom .scheduled_events import *\nfrom .shard import *\nfrom .stage_instance import *\nfrom .sticker import *\nfrom .team import *\nfrom .template import *\nfrom .threads import *\nfrom .user import *\nfrom .voice_client import *\nfrom .webhook import *\nfrom .welcome_screen import *\nfrom .widget import *\n\n\nclass VersionInfo(NamedTuple):\n major: int\n minor: int\n micro: int\n releaselevel: Literal[\"alpha\", \"beta\", \"candidate\", \"final\"]\n serial: int\n\n\nversion_info: VersionInfo = VersionInfo(major=2, minor=0, micro=0, releaselevel=\"beta\", serial=5)\n\nlogging.getLogger(__name__).addHandler(logging.NullHandler())\n", "path": "discord/__init__.py"}]}
| 2,000 | 96 |
gh_patches_debug_14814
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-599
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update urllib3: HTTP Header Injection vuln
**Describe the bug**
urllib3 needs to be updated to at least 1.25.9 to fix a high severity HTTP Header Injection vulnerability. Snyk info page [here](https://snyk.io/vuln/SNYK-PYTHON-URLLIB3-1014645).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 import logging
3 import os
4 from importlib import util
5 from os import path
6
7 import setuptools
8 from setuptools import setup
9
10 # read the contents of your README file
11 this_directory = path.abspath(path.dirname(__file__))
12 with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
13 long_description = f.read()
14
15 logger = logging.getLogger(__name__)
16 spec = util.spec_from_file_location(
17 "checkov.version", os.path.join("checkov", "version.py")
18 )
19 # noinspection PyUnresolvedReferences
20 mod = util.module_from_spec(spec)
21 spec.loader.exec_module(mod) # type: ignore
22 version = mod.version # type: ignore
23
24 setup(
25 extras_require={
26 "dev": [
27 "alabaster==0.7.12",
28 "attrs==19.3.0",
29 "babel==2.7.0",
30 "certifi==2019.11.28",
31 "chardet==3.0.4",
32 "coverage==4.5.4",
33 "coverage-badge==1.0.1",
34 "docopt==0.6.2",
35 "docutils==0.15.2",
36 "idna==2.8",
37 "imagesize==1.1.0",
38 "importlib-metadata==1.1.0; python_version < '3.8'",
39 "jinja2==2.10.3",
40 "lark-parser==0.7.8",
41 "markupsafe==1.1.1",
42 "more-itertools==8.0.0",
43 "packaging==19.2",
44 "pluggy==0.13.1",
45 "py==1.8.0",
46 "pygments==2.5.2",
47 "pyparsing==2.4.5",
48 "pytest==5.3.1",
49 "bc-python-hcl2>=0.3.10",
50 "pytz==2019.3",
51 "pyyaml==5.3.1",
52 "requests==2.22.0",
53 "six==1.15.0",
54 "snowballstemmer==2.0.0",
55 "sphinx==2.2.1",
56 "sphinxcontrib-applehelp==1.0.1",
57 "sphinxcontrib-devhelp==1.0.1",
58 "sphinxcontrib-htmlhelp==1.0.2",
59 "sphinxcontrib-jsmath==1.0.1",
60 "sphinxcontrib-qthelp==1.0.2",
61 "sphinxcontrib-serializinghtml==1.1.3",
62 "urllib3==1.25.7",
63 "wcwidth==0.1.7",
64 "zipp==0.6.0",
65 "GitPython==3.1.7",
66 "gitdb==4.0.5"
67 ]
68 },
69 install_requires=[
70 "boto3==1.12.43",
71 "chardet==3.0.4",
72 "colorama==0.4.3",
73 "docopt==0.6.2",
74 "idna==2.8",
75 "jmespath==0.10.0",
76 "junit-xml==1.8",
77 "lark-parser==0.7.8",
78 "bc-python-hcl2>=0.3.11",
79 "pyyaml==5.3.1",
80 "requests==2.22.0",
81 "six==1.15.0",
82 "tabulate==0.8.6",
83 "termcolor==1.1.0",
84 "urllib3==1.25.7",
85 "dpath==1.5.0",
86 "GitPython==3.1.7",
87 "gitdb==4.0.5"
88 ],
89 license="Apache License 2.0",
90 name="checkov",
91 version=version,
92 description="Infrastructure as code static analysis",
93 author="bridgecrew",
94 author_email="[email protected]",
95 url="https://github.com/bridgecrewio/checkov",
96 packages=setuptools.find_packages(exclude=["tests*","integration_tests*"]),
97 scripts=["bin/checkov","bin/checkov.cmd"],
98 long_description=long_description,
99 long_description_content_type="text/markdown",
100 classifiers=[
101 'Environment :: Console',
102 'Intended Audience :: Developers',
103 'Intended Audience :: System Administrators',
104 'Programming Language :: Python :: 3.7',
105 'Topic :: Security',
106 'Topic :: Software Development :: Build Tools'
107 ]
108 )
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -59,7 +59,7 @@
"sphinxcontrib-jsmath==1.0.1",
"sphinxcontrib-qthelp==1.0.2",
"sphinxcontrib-serializinghtml==1.1.3",
- "urllib3==1.25.7",
+ "urllib3==1.25.10",
"wcwidth==0.1.7",
"zipp==0.6.0",
"GitPython==3.1.7",
@@ -81,7 +81,7 @@
"six==1.15.0",
"tabulate==0.8.6",
"termcolor==1.1.0",
- "urllib3==1.25.7",
+ "urllib3==1.25.10",
"dpath==1.5.0",
"GitPython==3.1.7",
"gitdb==4.0.5"
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -59,7 +59,7 @@\n \"sphinxcontrib-jsmath==1.0.1\",\n \"sphinxcontrib-qthelp==1.0.2\",\n \"sphinxcontrib-serializinghtml==1.1.3\",\n- \"urllib3==1.25.7\",\n+ \"urllib3==1.25.10\",\n \"wcwidth==0.1.7\",\n \"zipp==0.6.0\",\n \"GitPython==3.1.7\",\n@@ -81,7 +81,7 @@\n \"six==1.15.0\",\n \"tabulate==0.8.6\",\n \"termcolor==1.1.0\",\n- \"urllib3==1.25.7\",\n+ \"urllib3==1.25.10\",\n \"dpath==1.5.0\",\n \"GitPython==3.1.7\",\n \"gitdb==4.0.5\"\n", "issue": "Update urllib3: HTTP Header Injection vuln\n**Describe the bug**\r\nurllib3 needs to be updated to at least 1.25.9 to fix a high severity HTTP Header Injection vulnerability. Snyk info page [here](https://snyk.io/vuln/SNYK-PYTHON-URLLIB3-1014645).\n", "before_files": [{"content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"alabaster==0.7.12\",\n \"attrs==19.3.0\",\n \"babel==2.7.0\",\n \"certifi==2019.11.28\",\n \"chardet==3.0.4\",\n \"coverage==4.5.4\",\n \"coverage-badge==1.0.1\",\n \"docopt==0.6.2\",\n \"docutils==0.15.2\",\n \"idna==2.8\",\n \"imagesize==1.1.0\",\n \"importlib-metadata==1.1.0; python_version < '3.8'\",\n \"jinja2==2.10.3\",\n \"lark-parser==0.7.8\",\n \"markupsafe==1.1.1\",\n \"more-itertools==8.0.0\",\n \"packaging==19.2\",\n \"pluggy==0.13.1\",\n \"py==1.8.0\",\n \"pygments==2.5.2\",\n \"pyparsing==2.4.5\",\n \"pytest==5.3.1\",\n \"bc-python-hcl2>=0.3.10\",\n \"pytz==2019.3\",\n \"pyyaml==5.3.1\",\n \"requests==2.22.0\",\n \"six==1.15.0\",\n \"snowballstemmer==2.0.0\",\n \"sphinx==2.2.1\",\n \"sphinxcontrib-applehelp==1.0.1\",\n \"sphinxcontrib-devhelp==1.0.1\",\n \"sphinxcontrib-htmlhelp==1.0.2\",\n \"sphinxcontrib-jsmath==1.0.1\",\n \"sphinxcontrib-qthelp==1.0.2\",\n \"sphinxcontrib-serializinghtml==1.1.3\",\n \"urllib3==1.25.7\",\n \"wcwidth==0.1.7\",\n \"zipp==0.6.0\",\n \"GitPython==3.1.7\",\n \"gitdb==4.0.5\"\n ]\n },\n install_requires=[\n \"boto3==1.12.43\",\n \"chardet==3.0.4\",\n \"colorama==0.4.3\",\n \"docopt==0.6.2\",\n \"idna==2.8\",\n \"jmespath==0.10.0\",\n \"junit-xml==1.8\",\n \"lark-parser==0.7.8\",\n \"bc-python-hcl2>=0.3.11\",\n \"pyyaml==5.3.1\",\n \"requests==2.22.0\",\n \"six==1.15.0\",\n \"tabulate==0.8.6\",\n \"termcolor==1.1.0\",\n \"urllib3==1.25.7\",\n \"dpath==1.5.0\",\n \"GitPython==3.1.7\",\n \"gitdb==4.0.5\"\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\",\"integration_tests*\"]),\n scripts=[\"bin/checkov\",\"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Security',\n 'Topic :: Software Development :: Build Tools'\n ]\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"alabaster==0.7.12\",\n \"attrs==19.3.0\",\n \"babel==2.7.0\",\n \"certifi==2019.11.28\",\n \"chardet==3.0.4\",\n \"coverage==4.5.4\",\n \"coverage-badge==1.0.1\",\n \"docopt==0.6.2\",\n \"docutils==0.15.2\",\n \"idna==2.8\",\n \"imagesize==1.1.0\",\n \"importlib-metadata==1.1.0; python_version < '3.8'\",\n \"jinja2==2.10.3\",\n \"lark-parser==0.7.8\",\n \"markupsafe==1.1.1\",\n \"more-itertools==8.0.0\",\n \"packaging==19.2\",\n \"pluggy==0.13.1\",\n \"py==1.8.0\",\n \"pygments==2.5.2\",\n \"pyparsing==2.4.5\",\n \"pytest==5.3.1\",\n \"bc-python-hcl2>=0.3.10\",\n \"pytz==2019.3\",\n \"pyyaml==5.3.1\",\n \"requests==2.22.0\",\n \"six==1.15.0\",\n \"snowballstemmer==2.0.0\",\n \"sphinx==2.2.1\",\n \"sphinxcontrib-applehelp==1.0.1\",\n \"sphinxcontrib-devhelp==1.0.1\",\n \"sphinxcontrib-htmlhelp==1.0.2\",\n \"sphinxcontrib-jsmath==1.0.1\",\n \"sphinxcontrib-qthelp==1.0.2\",\n \"sphinxcontrib-serializinghtml==1.1.3\",\n \"urllib3==1.25.10\",\n \"wcwidth==0.1.7\",\n \"zipp==0.6.0\",\n \"GitPython==3.1.7\",\n \"gitdb==4.0.5\"\n ]\n },\n install_requires=[\n \"boto3==1.12.43\",\n \"chardet==3.0.4\",\n \"colorama==0.4.3\",\n \"docopt==0.6.2\",\n \"idna==2.8\",\n \"jmespath==0.10.0\",\n \"junit-xml==1.8\",\n \"lark-parser==0.7.8\",\n \"bc-python-hcl2>=0.3.11\",\n \"pyyaml==5.3.1\",\n \"requests==2.22.0\",\n \"six==1.15.0\",\n \"tabulate==0.8.6\",\n \"termcolor==1.1.0\",\n \"urllib3==1.25.10\",\n \"dpath==1.5.0\",\n \"GitPython==3.1.7\",\n \"gitdb==4.0.5\"\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\",\"integration_tests*\"]),\n scripts=[\"bin/checkov\",\"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Security',\n 'Topic :: Software Development :: Build Tools'\n ]\n)\n", "path": "setup.py"}]}
| 1,588 | 246 |
gh_patches_debug_28270
|
rasdani/github-patches
|
git_diff
|
celery__celery-4744
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MongoDB backend does not support mongodb+srv:// URL's
## Checklist
https://github.com/celery/celery/blob/master/celery/backends/mongodb.py#L143-L146
## Steps to reproduce
Set the MongoDB URL to one like:
```mongodb+srv://mongo.private.corp.example.com/celery?ssl=false```
## Expected behavior
This works.
## Actual behavior
This fails because the URL parsing does not match on `mongodb+srv://` instead trying to match `mongodb://` only.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `celery/backends/mongodb.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """MongoDB result store backend."""
3 from __future__ import absolute_import, unicode_literals
4
5 from datetime import datetime, timedelta
6
7 from kombu.exceptions import EncodeError
8 from kombu.utils.objects import cached_property
9 from kombu.utils.url import maybe_sanitize_url
10
11 from celery import states
12 from celery.exceptions import ImproperlyConfigured
13 from celery.five import items, string_t
14
15 from .base import BaseBackend
16
17 try:
18 import pymongo
19 except ImportError: # pragma: no cover
20 pymongo = None # noqa
21
22 if pymongo:
23 try:
24 from bson.binary import Binary
25 except ImportError: # pragma: no cover
26 from pymongo.binary import Binary # noqa
27 from pymongo.errors import InvalidDocument # noqa
28 else: # pragma: no cover
29 Binary = None # noqa
30
31 class InvalidDocument(Exception): # noqa
32 pass
33
34 __all__ = ('MongoBackend',)
35
36 BINARY_CODECS = frozenset(['pickle', 'msgpack'])
37
38
39 class MongoBackend(BaseBackend):
40 """MongoDB result backend.
41
42 Raises:
43 celery.exceptions.ImproperlyConfigured:
44 if module :pypi:`pymongo` is not available.
45 """
46
47 mongo_host = None
48 host = 'localhost'
49 port = 27017
50 user = None
51 password = None
52 database_name = 'celery'
53 taskmeta_collection = 'celery_taskmeta'
54 groupmeta_collection = 'celery_groupmeta'
55 max_pool_size = 10
56 options = None
57
58 supports_autoexpire = False
59
60 _connection = None
61
62 def __init__(self, app=None, **kwargs):
63 self.options = {}
64
65 super(MongoBackend, self).__init__(app, **kwargs)
66
67 if not pymongo:
68 raise ImproperlyConfigured(
69 'You need to install the pymongo library to use the '
70 'MongoDB backend.')
71
72 # Set option defaults
73 for key, value in items(self._prepare_client_options()):
74 self.options.setdefault(key, value)
75
76 # update conf with mongo uri data, only if uri was given
77 if self.url:
78 if self.url == 'mongodb://':
79 self.url += 'localhost'
80
81 uri_data = pymongo.uri_parser.parse_uri(self.url)
82 # build the hosts list to create a mongo connection
83 hostslist = [
84 '{0}:{1}'.format(x[0], x[1]) for x in uri_data['nodelist']
85 ]
86 self.user = uri_data['username']
87 self.password = uri_data['password']
88 self.mongo_host = hostslist
89 if uri_data['database']:
90 # if no database is provided in the uri, use default
91 self.database_name = uri_data['database']
92
93 self.options.update(uri_data['options'])
94
95 # update conf with specific settings
96 config = self.app.conf.get('mongodb_backend_settings')
97 if config is not None:
98 if not isinstance(config, dict):
99 raise ImproperlyConfigured(
100 'MongoDB backend settings should be grouped in a dict')
101 config = dict(config) # don't modify original
102
103 if 'host' in config or 'port' in config:
104 # these should take over uri conf
105 self.mongo_host = None
106
107 self.host = config.pop('host', self.host)
108 self.port = config.pop('port', self.port)
109 self.mongo_host = config.pop('mongo_host', self.mongo_host)
110 self.user = config.pop('user', self.user)
111 self.password = config.pop('password', self.password)
112 self.database_name = config.pop('database', self.database_name)
113 self.taskmeta_collection = config.pop(
114 'taskmeta_collection', self.taskmeta_collection,
115 )
116 self.groupmeta_collection = config.pop(
117 'groupmeta_collection', self.groupmeta_collection,
118 )
119
120 self.options.update(config.pop('options', {}))
121 self.options.update(config)
122
123 def _prepare_client_options(self):
124 if pymongo.version_tuple >= (3,):
125 return {'maxPoolSize': self.max_pool_size}
126 else: # pragma: no cover
127 return {'max_pool_size': self.max_pool_size,
128 'auto_start_request': False}
129
130 def _get_connection(self):
131 """Connect to the MongoDB server."""
132 if self._connection is None:
133 from pymongo import MongoClient
134
135 host = self.mongo_host
136 if not host:
137 # The first pymongo.Connection() argument (host) can be
138 # a list of ['host:port'] elements or a mongodb connection
139 # URI. If this is the case, don't use self.port
140 # but let pymongo get the port(s) from the URI instead.
141 # This enables the use of replica sets and sharding.
142 # See pymongo.Connection() for more info.
143 host = self.host
144 if isinstance(host, string_t) \
145 and not host.startswith('mongodb://'):
146 host = 'mongodb://{0}:{1}'.format(host, self.port)
147 # don't change self.options
148 conf = dict(self.options)
149 conf['host'] = host
150
151 self._connection = MongoClient(**conf)
152
153 return self._connection
154
155 def encode(self, data):
156 if self.serializer == 'bson':
157 # mongodb handles serialization
158 return data
159 payload = super(MongoBackend, self).encode(data)
160
161 # serializer which are in a unsupported format (pickle/binary)
162 if self.serializer in BINARY_CODECS:
163 payload = Binary(payload)
164 return payload
165
166 def decode(self, data):
167 if self.serializer == 'bson':
168 return data
169 return super(MongoBackend, self).decode(data)
170
171 def _store_result(self, task_id, result, state,
172 traceback=None, request=None, **kwargs):
173 """Store return value and state of an executed task."""
174 meta = {
175 '_id': task_id,
176 'status': state,
177 'result': self.encode(result),
178 'date_done': datetime.utcnow(),
179 'traceback': self.encode(traceback),
180 'children': self.encode(
181 self.current_task_children(request),
182 ),
183 }
184 if request and getattr(request, 'parent_id', None):
185 meta['parent_id'] = request.parent_id
186
187 try:
188 self.collection.save(meta)
189 except InvalidDocument as exc:
190 raise EncodeError(exc)
191
192 return result
193
194 def _get_task_meta_for(self, task_id):
195 """Get task meta-data for a task by id."""
196 obj = self.collection.find_one({'_id': task_id})
197 if obj:
198 return self.meta_from_decoded({
199 'task_id': obj['_id'],
200 'status': obj['status'],
201 'result': self.decode(obj['result']),
202 'date_done': obj['date_done'],
203 'traceback': self.decode(obj['traceback']),
204 'children': self.decode(obj['children']),
205 })
206 return {'status': states.PENDING, 'result': None}
207
208 def _save_group(self, group_id, result):
209 """Save the group result."""
210 self.group_collection.save({
211 '_id': group_id,
212 'result': self.encode([i.id for i in result]),
213 'date_done': datetime.utcnow(),
214 })
215 return result
216
217 def _restore_group(self, group_id):
218 """Get the result for a group by id."""
219 obj = self.group_collection.find_one({'_id': group_id})
220 if obj:
221 return {
222 'task_id': obj['_id'],
223 'date_done': obj['date_done'],
224 'result': [
225 self.app.AsyncResult(task)
226 for task in self.decode(obj['result'])
227 ],
228 }
229
230 def _delete_group(self, group_id):
231 """Delete a group by id."""
232 self.group_collection.remove({'_id': group_id})
233
234 def _forget(self, task_id):
235 """Remove result from MongoDB.
236
237 Raises:
238 pymongo.exceptions.OperationsError:
239 if the task_id could not be removed.
240 """
241 # By using safe=True, this will wait until it receives a response from
242 # the server. Likewise, it will raise an OperationsError if the
243 # response was unable to be completed.
244 self.collection.remove({'_id': task_id})
245
246 def cleanup(self):
247 """Delete expired meta-data."""
248 self.collection.remove(
249 {'date_done': {'$lt': self.app.now() - self.expires_delta}},
250 )
251 self.group_collection.remove(
252 {'date_done': {'$lt': self.app.now() - self.expires_delta}},
253 )
254
255 def __reduce__(self, args=(), kwargs={}):
256 return super(MongoBackend, self).__reduce__(
257 args, dict(kwargs, expires=self.expires, url=self.url))
258
259 def _get_database(self):
260 conn = self._get_connection()
261 db = conn[self.database_name]
262 if self.user and self.password:
263 if not db.authenticate(self.user, self.password):
264 raise ImproperlyConfigured(
265 'Invalid MongoDB username or password.')
266 return db
267
268 @cached_property
269 def database(self):
270 """Get database from MongoDB connection.
271
272 performs authentication if necessary.
273 """
274 return self._get_database()
275
276 @cached_property
277 def collection(self):
278 """Get the meta-data task collection."""
279 collection = self.database[self.taskmeta_collection]
280
281 # Ensure an index on date_done is there, if not process the index
282 # in the background. Once completed cleanup will be much faster
283 collection.ensure_index('date_done', background='true')
284 return collection
285
286 @cached_property
287 def group_collection(self):
288 """Get the meta-data task collection."""
289 collection = self.database[self.groupmeta_collection]
290
291 # Ensure an index on date_done is there, if not process the index
292 # in the background. Once completed cleanup will be much faster
293 collection.ensure_index('date_done', background='true')
294 return collection
295
296 @cached_property
297 def expires_delta(self):
298 return timedelta(seconds=self.expires)
299
300 def as_uri(self, include_password=False):
301 """Return the backend as an URI.
302
303 Arguments:
304 include_password (bool): Password censored if disabled.
305 """
306 if not self.url:
307 return 'mongodb://'
308 if include_password:
309 return self.url
310
311 if ',' not in self.url:
312 return maybe_sanitize_url(self.url)
313
314 uri1, remainder = self.url.split(',', 1)
315 return ','.join([maybe_sanitize_url(uri1), remainder])
316
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/celery/backends/mongodb.py b/celery/backends/mongodb.py
--- a/celery/backends/mongodb.py
+++ b/celery/backends/mongodb.py
@@ -6,7 +6,7 @@
from kombu.exceptions import EncodeError
from kombu.utils.objects import cached_property
-from kombu.utils.url import maybe_sanitize_url
+from kombu.utils.url import maybe_sanitize_url, urlparse
from celery import states
from celery.exceptions import ImproperlyConfigured
@@ -75,8 +75,7 @@
# update conf with mongo uri data, only if uri was given
if self.url:
- if self.url == 'mongodb://':
- self.url += 'localhost'
+ self.url = self._ensure_mongodb_uri_compliance(self.url)
uri_data = pymongo.uri_parser.parse_uri(self.url)
# build the hosts list to create a mongo connection
@@ -120,6 +119,17 @@
self.options.update(config.pop('options', {}))
self.options.update(config)
+ @staticmethod
+ def _ensure_mongodb_uri_compliance(url):
+ parsed_url = urlparse(url)
+ if not parsed_url.scheme.startswith('mongodb'):
+ url = 'mongodb+{}'.format(url)
+
+ if url == 'mongodb://':
+ url += 'localhost'
+
+ return url
+
def _prepare_client_options(self):
if pymongo.version_tuple >= (3,):
return {'maxPoolSize': self.max_pool_size}
|
{"golden_diff": "diff --git a/celery/backends/mongodb.py b/celery/backends/mongodb.py\n--- a/celery/backends/mongodb.py\n+++ b/celery/backends/mongodb.py\n@@ -6,7 +6,7 @@\n \n from kombu.exceptions import EncodeError\n from kombu.utils.objects import cached_property\n-from kombu.utils.url import maybe_sanitize_url\n+from kombu.utils.url import maybe_sanitize_url, urlparse\n \n from celery import states\n from celery.exceptions import ImproperlyConfigured\n@@ -75,8 +75,7 @@\n \n # update conf with mongo uri data, only if uri was given\n if self.url:\n- if self.url == 'mongodb://':\n- self.url += 'localhost'\n+ self.url = self._ensure_mongodb_uri_compliance(self.url)\n \n uri_data = pymongo.uri_parser.parse_uri(self.url)\n # build the hosts list to create a mongo connection\n@@ -120,6 +119,17 @@\n self.options.update(config.pop('options', {}))\n self.options.update(config)\n \n+ @staticmethod\n+ def _ensure_mongodb_uri_compliance(url):\n+ parsed_url = urlparse(url)\n+ if not parsed_url.scheme.startswith('mongodb'):\n+ url = 'mongodb+{}'.format(url)\n+\n+ if url == 'mongodb://':\n+ url += 'localhost'\n+\n+ return url\n+\n def _prepare_client_options(self):\n if pymongo.version_tuple >= (3,):\n return {'maxPoolSize': self.max_pool_size}\n", "issue": "MongoDB backend does not support mongodb+srv:// URL's\n## Checklist\r\n\r\nhttps://github.com/celery/celery/blob/master/celery/backends/mongodb.py#L143-L146\r\n\r\n## Steps to reproduce\r\n\r\nSet the MongoDB URL to one like:\r\n\r\n```mongodb+srv://mongo.private.corp.example.com/celery?ssl=false```\r\n\r\n## Expected behavior\r\n\r\nThis works.\r\n\r\n## Actual behavior\r\n\r\nThis fails because the URL parsing does not match on `mongodb+srv://` instead trying to match `mongodb://` only.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"MongoDB result store backend.\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nfrom datetime import datetime, timedelta\n\nfrom kombu.exceptions import EncodeError\nfrom kombu.utils.objects import cached_property\nfrom kombu.utils.url import maybe_sanitize_url\n\nfrom celery import states\nfrom celery.exceptions import ImproperlyConfigured\nfrom celery.five import items, string_t\n\nfrom .base import BaseBackend\n\ntry:\n import pymongo\nexcept ImportError: # pragma: no cover\n pymongo = None # noqa\n\nif pymongo:\n try:\n from bson.binary import Binary\n except ImportError: # pragma: no cover\n from pymongo.binary import Binary # noqa\n from pymongo.errors import InvalidDocument # noqa\nelse: # pragma: no cover\n Binary = None # noqa\n\n class InvalidDocument(Exception): # noqa\n pass\n\n__all__ = ('MongoBackend',)\n\nBINARY_CODECS = frozenset(['pickle', 'msgpack'])\n\n\nclass MongoBackend(BaseBackend):\n \"\"\"MongoDB result backend.\n\n Raises:\n celery.exceptions.ImproperlyConfigured:\n if module :pypi:`pymongo` is not available.\n \"\"\"\n\n mongo_host = None\n host = 'localhost'\n port = 27017\n user = None\n password = None\n database_name = 'celery'\n taskmeta_collection = 'celery_taskmeta'\n groupmeta_collection = 'celery_groupmeta'\n max_pool_size = 10\n options = None\n\n supports_autoexpire = False\n\n _connection = None\n\n def __init__(self, app=None, **kwargs):\n self.options = {}\n\n super(MongoBackend, self).__init__(app, **kwargs)\n\n if not pymongo:\n raise ImproperlyConfigured(\n 'You need to install the pymongo library to use the '\n 'MongoDB backend.')\n\n # Set option defaults\n for key, value in items(self._prepare_client_options()):\n self.options.setdefault(key, value)\n\n # update conf with mongo uri data, only if uri was given\n if self.url:\n if self.url == 'mongodb://':\n self.url += 'localhost'\n\n uri_data = pymongo.uri_parser.parse_uri(self.url)\n # build the hosts list to create a mongo connection\n hostslist = [\n '{0}:{1}'.format(x[0], x[1]) for x in uri_data['nodelist']\n ]\n self.user = uri_data['username']\n self.password = uri_data['password']\n self.mongo_host = hostslist\n if uri_data['database']:\n # if no database is provided in the uri, use default\n self.database_name = uri_data['database']\n\n self.options.update(uri_data['options'])\n\n # update conf with specific settings\n config = self.app.conf.get('mongodb_backend_settings')\n if config is not None:\n if not isinstance(config, dict):\n raise ImproperlyConfigured(\n 'MongoDB backend settings should be grouped in a dict')\n config = dict(config) # don't modify original\n\n if 'host' in config or 'port' in config:\n # these should take over uri conf\n self.mongo_host = None\n\n self.host = config.pop('host', self.host)\n self.port = config.pop('port', self.port)\n self.mongo_host = config.pop('mongo_host', self.mongo_host)\n self.user = config.pop('user', self.user)\n self.password = config.pop('password', self.password)\n self.database_name = config.pop('database', self.database_name)\n self.taskmeta_collection = config.pop(\n 'taskmeta_collection', self.taskmeta_collection,\n )\n self.groupmeta_collection = config.pop(\n 'groupmeta_collection', self.groupmeta_collection,\n )\n\n self.options.update(config.pop('options', {}))\n self.options.update(config)\n\n def _prepare_client_options(self):\n if pymongo.version_tuple >= (3,):\n return {'maxPoolSize': self.max_pool_size}\n else: # pragma: no cover\n return {'max_pool_size': self.max_pool_size,\n 'auto_start_request': False}\n\n def _get_connection(self):\n \"\"\"Connect to the MongoDB server.\"\"\"\n if self._connection is None:\n from pymongo import MongoClient\n\n host = self.mongo_host\n if not host:\n # The first pymongo.Connection() argument (host) can be\n # a list of ['host:port'] elements or a mongodb connection\n # URI. If this is the case, don't use self.port\n # but let pymongo get the port(s) from the URI instead.\n # This enables the use of replica sets and sharding.\n # See pymongo.Connection() for more info.\n host = self.host\n if isinstance(host, string_t) \\\n and not host.startswith('mongodb://'):\n host = 'mongodb://{0}:{1}'.format(host, self.port)\n # don't change self.options\n conf = dict(self.options)\n conf['host'] = host\n\n self._connection = MongoClient(**conf)\n\n return self._connection\n\n def encode(self, data):\n if self.serializer == 'bson':\n # mongodb handles serialization\n return data\n payload = super(MongoBackend, self).encode(data)\n\n # serializer which are in a unsupported format (pickle/binary)\n if self.serializer in BINARY_CODECS:\n payload = Binary(payload)\n return payload\n\n def decode(self, data):\n if self.serializer == 'bson':\n return data\n return super(MongoBackend, self).decode(data)\n\n def _store_result(self, task_id, result, state,\n traceback=None, request=None, **kwargs):\n \"\"\"Store return value and state of an executed task.\"\"\"\n meta = {\n '_id': task_id,\n 'status': state,\n 'result': self.encode(result),\n 'date_done': datetime.utcnow(),\n 'traceback': self.encode(traceback),\n 'children': self.encode(\n self.current_task_children(request),\n ),\n }\n if request and getattr(request, 'parent_id', None):\n meta['parent_id'] = request.parent_id\n\n try:\n self.collection.save(meta)\n except InvalidDocument as exc:\n raise EncodeError(exc)\n\n return result\n\n def _get_task_meta_for(self, task_id):\n \"\"\"Get task meta-data for a task by id.\"\"\"\n obj = self.collection.find_one({'_id': task_id})\n if obj:\n return self.meta_from_decoded({\n 'task_id': obj['_id'],\n 'status': obj['status'],\n 'result': self.decode(obj['result']),\n 'date_done': obj['date_done'],\n 'traceback': self.decode(obj['traceback']),\n 'children': self.decode(obj['children']),\n })\n return {'status': states.PENDING, 'result': None}\n\n def _save_group(self, group_id, result):\n \"\"\"Save the group result.\"\"\"\n self.group_collection.save({\n '_id': group_id,\n 'result': self.encode([i.id for i in result]),\n 'date_done': datetime.utcnow(),\n })\n return result\n\n def _restore_group(self, group_id):\n \"\"\"Get the result for a group by id.\"\"\"\n obj = self.group_collection.find_one({'_id': group_id})\n if obj:\n return {\n 'task_id': obj['_id'],\n 'date_done': obj['date_done'],\n 'result': [\n self.app.AsyncResult(task)\n for task in self.decode(obj['result'])\n ],\n }\n\n def _delete_group(self, group_id):\n \"\"\"Delete a group by id.\"\"\"\n self.group_collection.remove({'_id': group_id})\n\n def _forget(self, task_id):\n \"\"\"Remove result from MongoDB.\n\n Raises:\n pymongo.exceptions.OperationsError:\n if the task_id could not be removed.\n \"\"\"\n # By using safe=True, this will wait until it receives a response from\n # the server. Likewise, it will raise an OperationsError if the\n # response was unable to be completed.\n self.collection.remove({'_id': task_id})\n\n def cleanup(self):\n \"\"\"Delete expired meta-data.\"\"\"\n self.collection.remove(\n {'date_done': {'$lt': self.app.now() - self.expires_delta}},\n )\n self.group_collection.remove(\n {'date_done': {'$lt': self.app.now() - self.expires_delta}},\n )\n\n def __reduce__(self, args=(), kwargs={}):\n return super(MongoBackend, self).__reduce__(\n args, dict(kwargs, expires=self.expires, url=self.url))\n\n def _get_database(self):\n conn = self._get_connection()\n db = conn[self.database_name]\n if self.user and self.password:\n if not db.authenticate(self.user, self.password):\n raise ImproperlyConfigured(\n 'Invalid MongoDB username or password.')\n return db\n\n @cached_property\n def database(self):\n \"\"\"Get database from MongoDB connection.\n\n performs authentication if necessary.\n \"\"\"\n return self._get_database()\n\n @cached_property\n def collection(self):\n \"\"\"Get the meta-data task collection.\"\"\"\n collection = self.database[self.taskmeta_collection]\n\n # Ensure an index on date_done is there, if not process the index\n # in the background. Once completed cleanup will be much faster\n collection.ensure_index('date_done', background='true')\n return collection\n\n @cached_property\n def group_collection(self):\n \"\"\"Get the meta-data task collection.\"\"\"\n collection = self.database[self.groupmeta_collection]\n\n # Ensure an index on date_done is there, if not process the index\n # in the background. Once completed cleanup will be much faster\n collection.ensure_index('date_done', background='true')\n return collection\n\n @cached_property\n def expires_delta(self):\n return timedelta(seconds=self.expires)\n\n def as_uri(self, include_password=False):\n \"\"\"Return the backend as an URI.\n\n Arguments:\n include_password (bool): Password censored if disabled.\n \"\"\"\n if not self.url:\n return 'mongodb://'\n if include_password:\n return self.url\n\n if ',' not in self.url:\n return maybe_sanitize_url(self.url)\n\n uri1, remainder = self.url.split(',', 1)\n return ','.join([maybe_sanitize_url(uri1), remainder])\n", "path": "celery/backends/mongodb.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"MongoDB result store backend.\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nfrom datetime import datetime, timedelta\n\nfrom kombu.exceptions import EncodeError\nfrom kombu.utils.objects import cached_property\nfrom kombu.utils.url import maybe_sanitize_url, urlparse\n\nfrom celery import states\nfrom celery.exceptions import ImproperlyConfigured\nfrom celery.five import items, string_t\n\nfrom .base import BaseBackend\n\ntry:\n import pymongo\nexcept ImportError: # pragma: no cover\n pymongo = None # noqa\n\nif pymongo:\n try:\n from bson.binary import Binary\n except ImportError: # pragma: no cover\n from pymongo.binary import Binary # noqa\n from pymongo.errors import InvalidDocument # noqa\nelse: # pragma: no cover\n Binary = None # noqa\n\n class InvalidDocument(Exception): # noqa\n pass\n\n__all__ = ('MongoBackend',)\n\nBINARY_CODECS = frozenset(['pickle', 'msgpack'])\n\n\nclass MongoBackend(BaseBackend):\n \"\"\"MongoDB result backend.\n\n Raises:\n celery.exceptions.ImproperlyConfigured:\n if module :pypi:`pymongo` is not available.\n \"\"\"\n\n mongo_host = None\n host = 'localhost'\n port = 27017\n user = None\n password = None\n database_name = 'celery'\n taskmeta_collection = 'celery_taskmeta'\n groupmeta_collection = 'celery_groupmeta'\n max_pool_size = 10\n options = None\n\n supports_autoexpire = False\n\n _connection = None\n\n def __init__(self, app=None, **kwargs):\n self.options = {}\n\n super(MongoBackend, self).__init__(app, **kwargs)\n\n if not pymongo:\n raise ImproperlyConfigured(\n 'You need to install the pymongo library to use the '\n 'MongoDB backend.')\n\n # Set option defaults\n for key, value in items(self._prepare_client_options()):\n self.options.setdefault(key, value)\n\n # update conf with mongo uri data, only if uri was given\n if self.url:\n self.url = self._ensure_mongodb_uri_compliance(self.url)\n\n uri_data = pymongo.uri_parser.parse_uri(self.url)\n # build the hosts list to create a mongo connection\n hostslist = [\n '{0}:{1}'.format(x[0], x[1]) for x in uri_data['nodelist']\n ]\n self.user = uri_data['username']\n self.password = uri_data['password']\n self.mongo_host = hostslist\n if uri_data['database']:\n # if no database is provided in the uri, use default\n self.database_name = uri_data['database']\n\n self.options.update(uri_data['options'])\n\n # update conf with specific settings\n config = self.app.conf.get('mongodb_backend_settings')\n if config is not None:\n if not isinstance(config, dict):\n raise ImproperlyConfigured(\n 'MongoDB backend settings should be grouped in a dict')\n config = dict(config) # don't modify original\n\n if 'host' in config or 'port' in config:\n # these should take over uri conf\n self.mongo_host = None\n\n self.host = config.pop('host', self.host)\n self.port = config.pop('port', self.port)\n self.mongo_host = config.pop('mongo_host', self.mongo_host)\n self.user = config.pop('user', self.user)\n self.password = config.pop('password', self.password)\n self.database_name = config.pop('database', self.database_name)\n self.taskmeta_collection = config.pop(\n 'taskmeta_collection', self.taskmeta_collection,\n )\n self.groupmeta_collection = config.pop(\n 'groupmeta_collection', self.groupmeta_collection,\n )\n\n self.options.update(config.pop('options', {}))\n self.options.update(config)\n\n @staticmethod\n def _ensure_mongodb_uri_compliance(url):\n parsed_url = urlparse(url)\n if not parsed_url.scheme.startswith('mongodb'):\n url = 'mongodb+{}'.format(url)\n\n if url == 'mongodb://':\n url += 'localhost'\n\n return url\n\n def _prepare_client_options(self):\n if pymongo.version_tuple >= (3,):\n return {'maxPoolSize': self.max_pool_size}\n else: # pragma: no cover\n return {'max_pool_size': self.max_pool_size,\n 'auto_start_request': False}\n\n def _get_connection(self):\n \"\"\"Connect to the MongoDB server.\"\"\"\n if self._connection is None:\n from pymongo import MongoClient\n\n host = self.mongo_host\n if not host:\n # The first pymongo.Connection() argument (host) can be\n # a list of ['host:port'] elements or a mongodb connection\n # URI. If this is the case, don't use self.port\n # but let pymongo get the port(s) from the URI instead.\n # This enables the use of replica sets and sharding.\n # See pymongo.Connection() for more info.\n host = self.host\n if isinstance(host, string_t) \\\n and not host.startswith('mongodb://'):\n host = 'mongodb://{0}:{1}'.format(host, self.port)\n # don't change self.options\n conf = dict(self.options)\n conf['host'] = host\n\n self._connection = MongoClient(**conf)\n\n return self._connection\n\n def encode(self, data):\n if self.serializer == 'bson':\n # mongodb handles serialization\n return data\n payload = super(MongoBackend, self).encode(data)\n\n # serializer which are in a unsupported format (pickle/binary)\n if self.serializer in BINARY_CODECS:\n payload = Binary(payload)\n return payload\n\n def decode(self, data):\n if self.serializer == 'bson':\n return data\n return super(MongoBackend, self).decode(data)\n\n def _store_result(self, task_id, result, state,\n traceback=None, request=None, **kwargs):\n \"\"\"Store return value and state of an executed task.\"\"\"\n meta = {\n '_id': task_id,\n 'status': state,\n 'result': self.encode(result),\n 'date_done': datetime.utcnow(),\n 'traceback': self.encode(traceback),\n 'children': self.encode(\n self.current_task_children(request),\n ),\n }\n if request and getattr(request, 'parent_id', None):\n meta['parent_id'] = request.parent_id\n\n try:\n self.collection.save(meta)\n except InvalidDocument as exc:\n raise EncodeError(exc)\n\n return result\n\n def _get_task_meta_for(self, task_id):\n \"\"\"Get task meta-data for a task by id.\"\"\"\n obj = self.collection.find_one({'_id': task_id})\n if obj:\n return self.meta_from_decoded({\n 'task_id': obj['_id'],\n 'status': obj['status'],\n 'result': self.decode(obj['result']),\n 'date_done': obj['date_done'],\n 'traceback': self.decode(obj['traceback']),\n 'children': self.decode(obj['children']),\n })\n return {'status': states.PENDING, 'result': None}\n\n def _save_group(self, group_id, result):\n \"\"\"Save the group result.\"\"\"\n self.group_collection.save({\n '_id': group_id,\n 'result': self.encode([i.id for i in result]),\n 'date_done': datetime.utcnow(),\n })\n return result\n\n def _restore_group(self, group_id):\n \"\"\"Get the result for a group by id.\"\"\"\n obj = self.group_collection.find_one({'_id': group_id})\n if obj:\n return {\n 'task_id': obj['_id'],\n 'date_done': obj['date_done'],\n 'result': [\n self.app.AsyncResult(task)\n for task in self.decode(obj['result'])\n ],\n }\n\n def _delete_group(self, group_id):\n \"\"\"Delete a group by id.\"\"\"\n self.group_collection.remove({'_id': group_id})\n\n def _forget(self, task_id):\n \"\"\"Remove result from MongoDB.\n\n Raises:\n pymongo.exceptions.OperationsError:\n if the task_id could not be removed.\n \"\"\"\n # By using safe=True, this will wait until it receives a response from\n # the server. Likewise, it will raise an OperationsError if the\n # response was unable to be completed.\n self.collection.remove({'_id': task_id})\n\n def cleanup(self):\n \"\"\"Delete expired meta-data.\"\"\"\n self.collection.remove(\n {'date_done': {'$lt': self.app.now() - self.expires_delta}},\n )\n self.group_collection.remove(\n {'date_done': {'$lt': self.app.now() - self.expires_delta}},\n )\n\n def __reduce__(self, args=(), kwargs={}):\n return super(MongoBackend, self).__reduce__(\n args, dict(kwargs, expires=self.expires, url=self.url))\n\n def _get_database(self):\n conn = self._get_connection()\n db = conn[self.database_name]\n if self.user and self.password:\n if not db.authenticate(self.user, self.password):\n raise ImproperlyConfigured(\n 'Invalid MongoDB username or password.')\n return db\n\n @cached_property\n def database(self):\n \"\"\"Get database from MongoDB connection.\n\n performs authentication if necessary.\n \"\"\"\n return self._get_database()\n\n @cached_property\n def collection(self):\n \"\"\"Get the meta-data task collection.\"\"\"\n collection = self.database[self.taskmeta_collection]\n\n # Ensure an index on date_done is there, if not process the index\n # in the background. Once completed cleanup will be much faster\n collection.ensure_index('date_done', background='true')\n return collection\n\n @cached_property\n def group_collection(self):\n \"\"\"Get the meta-data task collection.\"\"\"\n collection = self.database[self.groupmeta_collection]\n\n # Ensure an index on date_done is there, if not process the index\n # in the background. Once completed cleanup will be much faster\n collection.ensure_index('date_done', background='true')\n return collection\n\n @cached_property\n def expires_delta(self):\n return timedelta(seconds=self.expires)\n\n def as_uri(self, include_password=False):\n \"\"\"Return the backend as an URI.\n\n Arguments:\n include_password (bool): Password censored if disabled.\n \"\"\"\n if not self.url:\n return 'mongodb://'\n if include_password:\n return self.url\n\n if ',' not in self.url:\n return maybe_sanitize_url(self.url)\n\n uri1, remainder = self.url.split(',', 1)\n return ','.join([maybe_sanitize_url(uri1), remainder])\n", "path": "celery/backends/mongodb.py"}]}
| 3,485 | 338 |
gh_patches_debug_1412
|
rasdani/github-patches
|
git_diff
|
mne-tools__mne-python-3718
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ENH?: News / updates
It seems like we should have a little news/updates section of one-liners on the website, including things like:
1. Release notifications
2. Upcoming MNE-Python workshops
3. Upcoming coding sprints
If people agree I can put some old ones (last couple of release dates), and we can add to it as announcement-worthy things come up.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doc/sphinxext/cited_mne.py`
Content:
```
1 #!/usr/bin/env python
2 """Parse google scholar -> rst for MNE citations.
3
4 Example usage::
5
6 $ cited_mne --backend selenium --clear
7
8 """
9
10 # Author: Mainak Jas <[email protected]>
11 # License : BSD 3-clause
12
13 # Parts of this code were copied from google_scholar_parser
14 # (https://github.com/carlosp420/google_scholar_parser)
15
16 import os
17 import os.path as op
18 import re
19 import time
20 import random
21 import requests
22
23 import numpy as np
24 from joblib import Memory
25 from BeautifulSoup import BeautifulSoup
26
27 from mne.externals.tempita import Template
28 from mne.commands.utils import get_optparser
29
30 # cache to avoid making too many calls to Google Scholar
31 cachedir = 'cachedir'
32 if not os.path.exists(cachedir):
33 os.mkdir(cachedir)
34 mem = Memory(cachedir=cachedir, verbose=2)
35
36 UA = ('Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.9.2.9) '
37 'Gecko/20100913 Firefox/3.6.9')
38
39 # ##### Templates for citations #####
40 html = (u""".. _cited
41
42 Publications from MNE users
43 ===========================
44
45 Papers citing MNE as extracted from Google Scholar (on %s).
46
47 """)
48
49 cite_template = Template(u"""
50 {{for ii, publication in enumerate(publications)}}
51 {{ii + 1}}. {{publication}}.
52 {{endfor}}
53
54 """)
55
56
57 def parse_soup_page(soup):
58 """Parse the page using BeautifulSoup.
59
60 Parameters
61 ----------
62 soup : instance of BeautifulSoup
63 The page to be parsed.
64
65 Returns
66 -------
67 titles : list
68 The article titles.
69 authors : list
70 The name of the authors.
71 links : list
72 Hyperlinks to the articles.
73 """
74 titles, authors, links = list(), list(), list()
75 for div in soup.findAll('div'):
76 if div.name == "div" and div.get('class') == "gs_ri":
77 links.append(div.a['href'])
78 div_pub = div.findAll('div')
79 for d in div_pub:
80 if d.name == 'div' and d.get('class') == 'gs_a':
81 authors.append(d.text)
82 titles.append(div.a.text)
83 return titles, authors, links
84
85
86 def get_total_citations(soup):
87 """Get total citations."""
88 results = soup.find('div', attrs={'id': 'gs_ab_md'}).contents[0]
89 matches = re.search("About\s(\d+)\s", results)
90 if matches:
91 hits = matches.groups()[0]
92 return hits
93
94
95 def _get_soup(url, backend='selenium'):
96 """Get BeautifulSoup object from url.
97
98 Parameters
99 ----------
100 url : str
101 The url to fetch.
102 backend : 'selenium' | 'requests'
103 Use selenium by default because google can ask for captcha. For
104 'selenium' backend Firefox must be installed.
105
106 Returns
107 -------
108 soup : instance of BeautifulSoup
109 The soup page from the url.
110 """
111 if backend == 'requests':
112 req = requests.get(url, headers={'User-Agent': UA})
113 html_doc = req.text
114 soup = BeautifulSoup(html_doc)
115 if soup.find('div', attrs={'id': 'gs_ab_md'}) is None:
116 print('Falling back on to selenium backend due to captcha.')
117 backend = 'selenium'
118
119 if backend == 'selenium':
120 from selenium import webdriver
121 import selenium.webdriver.support.ui as ui
122
123 driver = webdriver.Firefox()
124 # give enough time to solve captcha.
125 wait = ui.WebDriverWait(driver, 200)
126
127 driver.get(url)
128 wait.until(lambda driver: driver.find_elements_by_id('gs_ab_md'))
129
130 html_doc = driver.page_source
131 soup = BeautifulSoup(html_doc)
132 driver.close()
133
134 return soup
135
136
137 @mem.cache
138 def get_citing_articles(cites_url, backend):
139 """Get the citing articles.
140
141 Parameters
142 ----------
143 cites_url: str
144 A citation url from Google Scholar.
145 backend : 'selenium' | 'requests'
146 Use selenium by default because google can ask for captcha. For
147 'selenium' backend Firefox must be installed.
148
149
150 Returns
151 -------
152 titles : list
153 The article titles.
154 authors : list
155 The name of the authors.
156 links : list
157 Hyperlinks to the articles.
158 """
159 n = random.random() * 5
160 time.sleep(n)
161 print("\nSleeping: {0} seconds".format(n))
162
163 # GS seems to allow only 20 hits per page!
164 cites_url += "&num=20"
165 soup = _get_soup(cites_url, backend=backend)
166 hits = get_total_citations(soup)
167 print("Got a total of {0} citations".format(hits))
168
169 hits = int(hits)
170 index = 0
171 titles, authors, links = list(), list(), list()
172 while hits > 1:
173 n = random.random() * 2
174 time.sleep(n)
175 if index > 0:
176 url = cites_url + "&start=" + str(index)
177 else:
178 url = cites_url
179 index += 20
180 hits -= 20
181 print("{0} more citations to process".format(hits))
182 soup = soup = _get_soup(url, backend=backend)
183 title, author, link = parse_soup_page(soup)
184 for this_title, this_author, this_link in zip(title, author, link):
185 titles.append(this_title)
186 authors.append(this_author)
187 links.append(this_link)
188
189 return titles, authors, links
190
191 if __name__ == '__main__':
192 parser = get_optparser(__file__)
193 parser.add_option("-c", "--clear", dest="clear", action='store_true',
194 help="if True, clear the cache.", default=False)
195 parser.add_option("-b", "--backend", dest="backend",
196 help="backend for parsing (selenium | requests)",
197 default='requests')
198 options, args = parser.parse_args()
199 backend, clear = options.backend, options.clear
200
201 if clear:
202 mem.clear()
203
204 random.seed()
205 gen_date = time.strftime("%B %d, %Y")
206 html = html % gen_date
207
208 url_tails = ['1521584321377182930', '12188330066413208874']
209 papers = ['MEG and EEG data analysis with MNE-Python',
210 'MNE software for processing MEG and EEG data']
211
212 publications = list()
213 for url_tail, paper in zip(url_tails, papers):
214 titles, authors, links = get_citing_articles(
215 'https://scholar.google.co.in/scholar?cites=%s'
216 % url_tail, backend=backend)
217
218 this_publication = list()
219 for ii in range(len(titles)):
220 pub = '`%s. <%s>`_. %s' % (titles[ii], links[ii], authors[ii])
221 this_publication.append(pub)
222
223 this_publication = [p.encode('utf8') for p in this_publication]
224 publications.append(this_publication)
225
226 # get a union of the citations for the two papers, sorted in
227 # alphabetic order
228 publications = np.union1d(publications[1], publications[0]).tolist()
229
230 # sort by year of publication
231 years = list()
232 for pub in publications:
233 m = re.search('\d{4} -', pub)
234 if m is None:
235 years.append(-1)
236 else:
237 years.append(int(m.group(0)[:-2]))
238 order = np.argsort(years)[::-1]
239 publications = [publications[idx] for idx in order]
240
241 # filter out publications not containing (http://, https://, ftp://)
242 publications = [p for p in publications if
243 any(sub in p for sub in ('http://', 'https://', 'ftp://'))]
244
245 # create rst & cleanup
246 this_html = cite_template.substitute(publications=publications)
247 this_html = this_html.replace('…', '...')
248 html += this_html
249
250 # output an rst file
251 with open(op.join('..', 'cited.rst'), 'w') as f:
252 f.write(html.encode('utf8'))
253
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/doc/sphinxext/cited_mne.py b/doc/sphinxext/cited_mne.py
--- a/doc/sphinxext/cited_mne.py
+++ b/doc/sphinxext/cited_mne.py
@@ -39,8 +39,8 @@
# ##### Templates for citations #####
html = (u""".. _cited
-Publications from MNE users
-===========================
+Publications by users
+=====================
Papers citing MNE as extracted from Google Scholar (on %s).
|
{"golden_diff": "diff --git a/doc/sphinxext/cited_mne.py b/doc/sphinxext/cited_mne.py\n--- a/doc/sphinxext/cited_mne.py\n+++ b/doc/sphinxext/cited_mne.py\n@@ -39,8 +39,8 @@\n # ##### Templates for citations #####\n html = (u\"\"\".. _cited\n \n-Publications from MNE users\n-===========================\n+Publications by users\n+=====================\n \n Papers citing MNE as extracted from Google Scholar (on %s).\n", "issue": "ENH?: News / updates\nIt seems like we should have a little news/updates section of one-liners on the website, including things like:\n1. Release notifications\n2. Upcoming MNE-Python workshops\n3. Upcoming coding sprints\n\nIf people agree I can put some old ones (last couple of release dates), and we can add to it as announcement-worthy things come up.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"Parse google scholar -> rst for MNE citations.\n\nExample usage::\n\n $ cited_mne --backend selenium --clear\n\n\"\"\"\n\n# Author: Mainak Jas <[email protected]>\n# License : BSD 3-clause\n\n# Parts of this code were copied from google_scholar_parser\n# (https://github.com/carlosp420/google_scholar_parser)\n\nimport os\nimport os.path as op\nimport re\nimport time\nimport random\nimport requests\n\nimport numpy as np\nfrom joblib import Memory\nfrom BeautifulSoup import BeautifulSoup\n\nfrom mne.externals.tempita import Template\nfrom mne.commands.utils import get_optparser\n\n# cache to avoid making too many calls to Google Scholar\ncachedir = 'cachedir'\nif not os.path.exists(cachedir):\n os.mkdir(cachedir)\nmem = Memory(cachedir=cachedir, verbose=2)\n\nUA = ('Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.9.2.9) '\n 'Gecko/20100913 Firefox/3.6.9')\n\n# ##### Templates for citations #####\nhtml = (u\"\"\".. _cited\n\nPublications from MNE users\n===========================\n\nPapers citing MNE as extracted from Google Scholar (on %s).\n\n\"\"\")\n\ncite_template = Template(u\"\"\"\n{{for ii, publication in enumerate(publications)}}\n{{ii + 1}}. {{publication}}.\n{{endfor}}\n\n\"\"\")\n\n\ndef parse_soup_page(soup):\n \"\"\"Parse the page using BeautifulSoup.\n\n Parameters\n ----------\n soup : instance of BeautifulSoup\n The page to be parsed.\n\n Returns\n -------\n titles : list\n The article titles.\n authors : list\n The name of the authors.\n links : list\n Hyperlinks to the articles.\n \"\"\"\n titles, authors, links = list(), list(), list()\n for div in soup.findAll('div'):\n if div.name == \"div\" and div.get('class') == \"gs_ri\":\n links.append(div.a['href'])\n div_pub = div.findAll('div')\n for d in div_pub:\n if d.name == 'div' and d.get('class') == 'gs_a':\n authors.append(d.text)\n titles.append(div.a.text)\n return titles, authors, links\n\n\ndef get_total_citations(soup):\n \"\"\"Get total citations.\"\"\"\n results = soup.find('div', attrs={'id': 'gs_ab_md'}).contents[0]\n matches = re.search(\"About\\s(\\d+)\\s\", results)\n if matches:\n hits = matches.groups()[0]\n return hits\n\n\ndef _get_soup(url, backend='selenium'):\n \"\"\"Get BeautifulSoup object from url.\n\n Parameters\n ----------\n url : str\n The url to fetch.\n backend : 'selenium' | 'requests'\n Use selenium by default because google can ask for captcha. For\n 'selenium' backend Firefox must be installed.\n\n Returns\n -------\n soup : instance of BeautifulSoup\n The soup page from the url.\n \"\"\"\n if backend == 'requests':\n req = requests.get(url, headers={'User-Agent': UA})\n html_doc = req.text\n soup = BeautifulSoup(html_doc)\n if soup.find('div', attrs={'id': 'gs_ab_md'}) is None:\n print('Falling back on to selenium backend due to captcha.')\n backend = 'selenium'\n\n if backend == 'selenium':\n from selenium import webdriver\n import selenium.webdriver.support.ui as ui\n\n driver = webdriver.Firefox()\n # give enough time to solve captcha.\n wait = ui.WebDriverWait(driver, 200)\n\n driver.get(url)\n wait.until(lambda driver: driver.find_elements_by_id('gs_ab_md'))\n\n html_doc = driver.page_source\n soup = BeautifulSoup(html_doc)\n driver.close()\n\n return soup\n\n\[email protected]\ndef get_citing_articles(cites_url, backend):\n \"\"\"Get the citing articles.\n\n Parameters\n ----------\n cites_url: str\n A citation url from Google Scholar.\n backend : 'selenium' | 'requests'\n Use selenium by default because google can ask for captcha. For\n 'selenium' backend Firefox must be installed.\n\n\n Returns\n -------\n titles : list\n The article titles.\n authors : list\n The name of the authors.\n links : list\n Hyperlinks to the articles.\n \"\"\"\n n = random.random() * 5\n time.sleep(n)\n print(\"\\nSleeping: {0} seconds\".format(n))\n\n # GS seems to allow only 20 hits per page!\n cites_url += \"&num=20\"\n soup = _get_soup(cites_url, backend=backend)\n hits = get_total_citations(soup)\n print(\"Got a total of {0} citations\".format(hits))\n\n hits = int(hits)\n index = 0\n titles, authors, links = list(), list(), list()\n while hits > 1:\n n = random.random() * 2\n time.sleep(n)\n if index > 0:\n url = cites_url + \"&start=\" + str(index)\n else:\n url = cites_url\n index += 20\n hits -= 20\n print(\"{0} more citations to process\".format(hits))\n soup = soup = _get_soup(url, backend=backend)\n title, author, link = parse_soup_page(soup)\n for this_title, this_author, this_link in zip(title, author, link):\n titles.append(this_title)\n authors.append(this_author)\n links.append(this_link)\n\n return titles, authors, links\n\nif __name__ == '__main__':\n parser = get_optparser(__file__)\n parser.add_option(\"-c\", \"--clear\", dest=\"clear\", action='store_true',\n help=\"if True, clear the cache.\", default=False)\n parser.add_option(\"-b\", \"--backend\", dest=\"backend\",\n help=\"backend for parsing (selenium | requests)\",\n default='requests')\n options, args = parser.parse_args()\n backend, clear = options.backend, options.clear\n\n if clear:\n mem.clear()\n\n random.seed()\n gen_date = time.strftime(\"%B %d, %Y\")\n html = html % gen_date\n\n url_tails = ['1521584321377182930', '12188330066413208874']\n papers = ['MEG and EEG data analysis with MNE-Python',\n 'MNE software for processing MEG and EEG data']\n\n publications = list()\n for url_tail, paper in zip(url_tails, papers):\n titles, authors, links = get_citing_articles(\n 'https://scholar.google.co.in/scholar?cites=%s'\n % url_tail, backend=backend)\n\n this_publication = list()\n for ii in range(len(titles)):\n pub = '`%s. <%s>`_. %s' % (titles[ii], links[ii], authors[ii])\n this_publication.append(pub)\n\n this_publication = [p.encode('utf8') for p in this_publication]\n publications.append(this_publication)\n\n # get a union of the citations for the two papers, sorted in\n # alphabetic order\n publications = np.union1d(publications[1], publications[0]).tolist()\n\n # sort by year of publication\n years = list()\n for pub in publications:\n m = re.search('\\d{4} -', pub)\n if m is None:\n years.append(-1)\n else:\n years.append(int(m.group(0)[:-2]))\n order = np.argsort(years)[::-1]\n publications = [publications[idx] for idx in order]\n\n # filter out publications not containing (http://, https://, ftp://)\n publications = [p for p in publications if\n any(sub in p for sub in ('http://', 'https://', 'ftp://'))]\n\n # create rst & cleanup\n this_html = cite_template.substitute(publications=publications)\n this_html = this_html.replace('…', '...')\n html += this_html\n\n # output an rst file\n with open(op.join('..', 'cited.rst'), 'w') as f:\n f.write(html.encode('utf8'))\n", "path": "doc/sphinxext/cited_mne.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"Parse google scholar -> rst for MNE citations.\n\nExample usage::\n\n $ cited_mne --backend selenium --clear\n\n\"\"\"\n\n# Author: Mainak Jas <[email protected]>\n# License : BSD 3-clause\n\n# Parts of this code were copied from google_scholar_parser\n# (https://github.com/carlosp420/google_scholar_parser)\n\nimport os\nimport os.path as op\nimport re\nimport time\nimport random\nimport requests\n\nimport numpy as np\nfrom joblib import Memory\nfrom BeautifulSoup import BeautifulSoup\n\nfrom mne.externals.tempita import Template\nfrom mne.commands.utils import get_optparser\n\n# cache to avoid making too many calls to Google Scholar\ncachedir = 'cachedir'\nif not os.path.exists(cachedir):\n os.mkdir(cachedir)\nmem = Memory(cachedir=cachedir, verbose=2)\n\nUA = ('Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.9.2.9) '\n 'Gecko/20100913 Firefox/3.6.9')\n\n# ##### Templates for citations #####\nhtml = (u\"\"\".. _cited\n\nPublications by users\n=====================\n\nPapers citing MNE as extracted from Google Scholar (on %s).\n\n\"\"\")\n\ncite_template = Template(u\"\"\"\n{{for ii, publication in enumerate(publications)}}\n{{ii + 1}}. {{publication}}.\n{{endfor}}\n\n\"\"\")\n\n\ndef parse_soup_page(soup):\n \"\"\"Parse the page using BeautifulSoup.\n\n Parameters\n ----------\n soup : instance of BeautifulSoup\n The page to be parsed.\n\n Returns\n -------\n titles : list\n The article titles.\n authors : list\n The name of the authors.\n links : list\n Hyperlinks to the articles.\n \"\"\"\n titles, authors, links = list(), list(), list()\n for div in soup.findAll('div'):\n if div.name == \"div\" and div.get('class') == \"gs_ri\":\n links.append(div.a['href'])\n div_pub = div.findAll('div')\n for d in div_pub:\n if d.name == 'div' and d.get('class') == 'gs_a':\n authors.append(d.text)\n titles.append(div.a.text)\n return titles, authors, links\n\n\ndef get_total_citations(soup):\n \"\"\"Get total citations.\"\"\"\n results = soup.find('div', attrs={'id': 'gs_ab_md'}).contents[0]\n matches = re.search(\"About\\s(\\d+)\\s\", results)\n if matches:\n hits = matches.groups()[0]\n return hits\n\n\ndef _get_soup(url, backend='selenium'):\n \"\"\"Get BeautifulSoup object from url.\n\n Parameters\n ----------\n url : str\n The url to fetch.\n backend : 'selenium' | 'requests'\n Use selenium by default because google can ask for captcha. For\n 'selenium' backend Firefox must be installed.\n\n Returns\n -------\n soup : instance of BeautifulSoup\n The soup page from the url.\n \"\"\"\n if backend == 'requests':\n req = requests.get(url, headers={'User-Agent': UA})\n html_doc = req.text\n soup = BeautifulSoup(html_doc)\n if soup.find('div', attrs={'id': 'gs_ab_md'}) is None:\n print('Falling back on to selenium backend due to captcha.')\n backend = 'selenium'\n\n if backend == 'selenium':\n from selenium import webdriver\n import selenium.webdriver.support.ui as ui\n\n driver = webdriver.Firefox()\n # give enough time to solve captcha.\n wait = ui.WebDriverWait(driver, 200)\n\n driver.get(url)\n wait.until(lambda driver: driver.find_elements_by_id('gs_ab_md'))\n\n html_doc = driver.page_source\n soup = BeautifulSoup(html_doc)\n driver.close()\n\n return soup\n\n\[email protected]\ndef get_citing_articles(cites_url, backend):\n \"\"\"Get the citing articles.\n\n Parameters\n ----------\n cites_url: str\n A citation url from Google Scholar.\n backend : 'selenium' | 'requests'\n Use selenium by default because google can ask for captcha. For\n 'selenium' backend Firefox must be installed.\n\n\n Returns\n -------\n titles : list\n The article titles.\n authors : list\n The name of the authors.\n links : list\n Hyperlinks to the articles.\n \"\"\"\n n = random.random() * 5\n time.sleep(n)\n print(\"\\nSleeping: {0} seconds\".format(n))\n\n # GS seems to allow only 20 hits per page!\n cites_url += \"&num=20\"\n soup = _get_soup(cites_url, backend=backend)\n hits = get_total_citations(soup)\n print(\"Got a total of {0} citations\".format(hits))\n\n hits = int(hits)\n index = 0\n titles, authors, links = list(), list(), list()\n while hits > 1:\n n = random.random() * 2\n time.sleep(n)\n if index > 0:\n url = cites_url + \"&start=\" + str(index)\n else:\n url = cites_url\n index += 20\n hits -= 20\n print(\"{0} more citations to process\".format(hits))\n soup = soup = _get_soup(url, backend=backend)\n title, author, link = parse_soup_page(soup)\n for this_title, this_author, this_link in zip(title, author, link):\n titles.append(this_title)\n authors.append(this_author)\n links.append(this_link)\n\n return titles, authors, links\n\nif __name__ == '__main__':\n parser = get_optparser(__file__)\n parser.add_option(\"-c\", \"--clear\", dest=\"clear\", action='store_true',\n help=\"if True, clear the cache.\", default=False)\n parser.add_option(\"-b\", \"--backend\", dest=\"backend\",\n help=\"backend for parsing (selenium | requests)\",\n default='requests')\n options, args = parser.parse_args()\n backend, clear = options.backend, options.clear\n\n if clear:\n mem.clear()\n\n random.seed()\n gen_date = time.strftime(\"%B %d, %Y\")\n html = html % gen_date\n\n url_tails = ['1521584321377182930', '12188330066413208874']\n papers = ['MEG and EEG data analysis with MNE-Python',\n 'MNE software for processing MEG and EEG data']\n\n publications = list()\n for url_tail, paper in zip(url_tails, papers):\n titles, authors, links = get_citing_articles(\n 'https://scholar.google.co.in/scholar?cites=%s'\n % url_tail, backend=backend)\n\n this_publication = list()\n for ii in range(len(titles)):\n pub = '`%s. <%s>`_. %s' % (titles[ii], links[ii], authors[ii])\n this_publication.append(pub)\n\n this_publication = [p.encode('utf8') for p in this_publication]\n publications.append(this_publication)\n\n # get a union of the citations for the two papers, sorted in\n # alphabetic order\n publications = np.union1d(publications[1], publications[0]).tolist()\n\n # sort by year of publication\n years = list()\n for pub in publications:\n m = re.search('\\d{4} -', pub)\n if m is None:\n years.append(-1)\n else:\n years.append(int(m.group(0)[:-2]))\n order = np.argsort(years)[::-1]\n publications = [publications[idx] for idx in order]\n\n # filter out publications not containing (http://, https://, ftp://)\n publications = [p for p in publications if\n any(sub in p for sub in ('http://', 'https://', 'ftp://'))]\n\n # create rst & cleanup\n this_html = cite_template.substitute(publications=publications)\n this_html = this_html.replace('…', '...')\n html += this_html\n\n # output an rst file\n with open(op.join('..', 'cited.rst'), 'w') as f:\n f.write(html.encode('utf8'))\n", "path": "doc/sphinxext/cited_mne.py"}]}
| 2,869 | 112 |
gh_patches_debug_16857
|
rasdani/github-patches
|
git_diff
|
cloud-custodian__cloud-custodian-4882
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
c7n_mailer, AWS not installing Lambda, no logs, no errors
I have tried to setup/install the c7n_mailer lambda on our AWS account according to the docs. I have tried it from my Mac and from Docker Images (in a Jenkins pipeline) to no avail. The kicker is I am not getting any error, or output. Is there anything I can look at to see if I have an issue from my end our something on the AWS account. This is the command I am running:
```
c7n-mailer --config mailer.yml --update-lambda
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/c7n_mailer/c7n_mailer/deploy.py`
Content:
```
1 # Copyright 2016-2017 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from __future__ import absolute_import, division, print_function, unicode_literals
15
16 import copy
17 import json
18 import os
19
20 from c7n.mu import (
21 CloudWatchEventSource,
22 LambdaFunction,
23 LambdaManager,
24 PythonPackageArchive)
25
26
27 entry_source = """\
28 import logging
29
30 from c7n_mailer import handle
31
32 logger = logging.getLogger('custodian.mailer')
33 log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
34 logging.basicConfig(level=logging.INFO, format=log_format)
35 logging.getLogger('botocore').setLevel(logging.WARNING)
36
37 def dispatch(event, context):
38 return handle.start_c7n_mailer(logger)
39 """
40
41
42 def get_archive(config):
43 archive = PythonPackageArchive(modules=[
44 'c7n_mailer',
45 # core deps
46 'jinja2', 'markupsafe', 'ruamel', 'ldap3', 'pyasn1', 'redis',
47 # for other dependencies
48 'pkg_resources',
49 # transport datadog - recursive deps
50 'datadog', 'simplejson', 'decorator',
51 # requests (recursive deps), needed by datadog, slackclient, splunk
52 'requests', 'urllib3', 'idna', 'chardet', 'certifi',
53 # used by splunk; also dependencies of c7n itself
54 'jsonpointer', 'jsonpatch'])
55
56 for d in set(config['templates_folders']):
57 if not os.path.exists(d):
58 continue
59 for t in [f for f in os.listdir(d) if os.path.splitext(f)[1] == '.j2']:
60 with open(os.path.join(d, t)) as fh:
61 archive.add_contents('msg-templates/%s' % t, fh.read())
62
63 function_config = copy.deepcopy(config)
64 function_config['templates_folders'] = ['msg-templates/']
65 archive.add_contents('config.json', json.dumps(function_config))
66 archive.add_contents('periodic.py', entry_source)
67
68 archive.close()
69 return archive
70
71
72 def provision(config, session_factory):
73 func_config = dict(
74 name=config.get('lambda_name', 'cloud-custodian-mailer'),
75 description=config.get('lambda_description', 'Cloud Custodian Mailer'),
76 tags=config.get('lambda_tags', {}),
77 handler='periodic.dispatch',
78 runtime=config['runtime'],
79 memory_size=config['memory'],
80 timeout=config['timeout'],
81 role=config['role'],
82 subnets=config['subnets'],
83 security_groups=config['security_groups'],
84 dead_letter_config=config.get('dead_letter_config', {}),
85 events=[
86 CloudWatchEventSource(
87 {'type': 'periodic',
88 'schedule': config.get('lambda_schedule', 'rate(5 minutes)')},
89 session_factory)
90 ])
91
92 archive = get_archive(config)
93 func = LambdaFunction(func_config, archive)
94 manager = LambdaManager(session_factory)
95 manager.publish(func)
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tools/c7n_mailer/c7n_mailer/deploy.py b/tools/c7n_mailer/c7n_mailer/deploy.py
--- a/tools/c7n_mailer/c7n_mailer/deploy.py
+++ b/tools/c7n_mailer/c7n_mailer/deploy.py
@@ -14,6 +14,7 @@
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
+import logging
import json
import os
@@ -24,6 +25,8 @@
PythonPackageArchive)
+log = logging.getLogger('custodian-mailer')
+
entry_source = """\
import logging
@@ -91,5 +94,6 @@
archive = get_archive(config)
func = LambdaFunction(func_config, archive)
+ log.info("Provisioning mailer lambda %s" % (session_factory().region_name))
manager = LambdaManager(session_factory)
manager.publish(func)
|
{"golden_diff": "diff --git a/tools/c7n_mailer/c7n_mailer/deploy.py b/tools/c7n_mailer/c7n_mailer/deploy.py\n--- a/tools/c7n_mailer/c7n_mailer/deploy.py\n+++ b/tools/c7n_mailer/c7n_mailer/deploy.py\n@@ -14,6 +14,7 @@\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n import copy\n+import logging\n import json\n import os\n \n@@ -24,6 +25,8 @@\n PythonPackageArchive)\n \n \n+log = logging.getLogger('custodian-mailer')\n+\n entry_source = \"\"\"\\\n import logging\n \n@@ -91,5 +94,6 @@\n \n archive = get_archive(config)\n func = LambdaFunction(func_config, archive)\n+ log.info(\"Provisioning mailer lambda %s\" % (session_factory().region_name))\n manager = LambdaManager(session_factory)\n manager.publish(func)\n", "issue": "c7n_mailer, AWS not installing Lambda, no logs, no errors\nI have tried to setup/install the c7n_mailer lambda on our AWS account according to the docs. I have tried it from my Mac and from Docker Images (in a Jenkins pipeline) to no avail. The kicker is I am not getting any error, or output. Is there anything I can look at to see if I have an issue from my end our something on the AWS account. This is the command I am running:\r\n```\r\nc7n-mailer --config mailer.yml --update-lambda\r\n```\n", "before_files": [{"content": "# Copyright 2016-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport copy\nimport json\nimport os\n\nfrom c7n.mu import (\n CloudWatchEventSource,\n LambdaFunction,\n LambdaManager,\n PythonPackageArchive)\n\n\nentry_source = \"\"\"\\\nimport logging\n\nfrom c7n_mailer import handle\n\nlogger = logging.getLogger('custodian.mailer')\nlog_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\nlogging.basicConfig(level=logging.INFO, format=log_format)\nlogging.getLogger('botocore').setLevel(logging.WARNING)\n\ndef dispatch(event, context):\n return handle.start_c7n_mailer(logger)\n\"\"\"\n\n\ndef get_archive(config):\n archive = PythonPackageArchive(modules=[\n 'c7n_mailer',\n # core deps\n 'jinja2', 'markupsafe', 'ruamel', 'ldap3', 'pyasn1', 'redis',\n # for other dependencies\n 'pkg_resources',\n # transport datadog - recursive deps\n 'datadog', 'simplejson', 'decorator',\n # requests (recursive deps), needed by datadog, slackclient, splunk\n 'requests', 'urllib3', 'idna', 'chardet', 'certifi',\n # used by splunk; also dependencies of c7n itself\n 'jsonpointer', 'jsonpatch'])\n\n for d in set(config['templates_folders']):\n if not os.path.exists(d):\n continue\n for t in [f for f in os.listdir(d) if os.path.splitext(f)[1] == '.j2']:\n with open(os.path.join(d, t)) as fh:\n archive.add_contents('msg-templates/%s' % t, fh.read())\n\n function_config = copy.deepcopy(config)\n function_config['templates_folders'] = ['msg-templates/']\n archive.add_contents('config.json', json.dumps(function_config))\n archive.add_contents('periodic.py', entry_source)\n\n archive.close()\n return archive\n\n\ndef provision(config, session_factory):\n func_config = dict(\n name=config.get('lambda_name', 'cloud-custodian-mailer'),\n description=config.get('lambda_description', 'Cloud Custodian Mailer'),\n tags=config.get('lambda_tags', {}),\n handler='periodic.dispatch',\n runtime=config['runtime'],\n memory_size=config['memory'],\n timeout=config['timeout'],\n role=config['role'],\n subnets=config['subnets'],\n security_groups=config['security_groups'],\n dead_letter_config=config.get('dead_letter_config', {}),\n events=[\n CloudWatchEventSource(\n {'type': 'periodic',\n 'schedule': config.get('lambda_schedule', 'rate(5 minutes)')},\n session_factory)\n ])\n\n archive = get_archive(config)\n func = LambdaFunction(func_config, archive)\n manager = LambdaManager(session_factory)\n manager.publish(func)\n", "path": "tools/c7n_mailer/c7n_mailer/deploy.py"}], "after_files": [{"content": "# Copyright 2016-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport copy\nimport logging\nimport json\nimport os\n\nfrom c7n.mu import (\n CloudWatchEventSource,\n LambdaFunction,\n LambdaManager,\n PythonPackageArchive)\n\n\nlog = logging.getLogger('custodian-mailer')\n\nentry_source = \"\"\"\\\nimport logging\n\nfrom c7n_mailer import handle\n\nlogger = logging.getLogger('custodian.mailer')\nlog_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\nlogging.basicConfig(level=logging.INFO, format=log_format)\nlogging.getLogger('botocore').setLevel(logging.WARNING)\n\ndef dispatch(event, context):\n return handle.start_c7n_mailer(logger)\n\"\"\"\n\n\ndef get_archive(config):\n archive = PythonPackageArchive(modules=[\n 'c7n_mailer',\n # core deps\n 'jinja2', 'markupsafe', 'ruamel', 'ldap3', 'pyasn1', 'redis',\n # for other dependencies\n 'pkg_resources',\n # transport datadog - recursive deps\n 'datadog', 'simplejson', 'decorator',\n # requests (recursive deps), needed by datadog, slackclient, splunk\n 'requests', 'urllib3', 'idna', 'chardet', 'certifi',\n # used by splunk; also dependencies of c7n itself\n 'jsonpointer', 'jsonpatch'])\n\n for d in set(config['templates_folders']):\n if not os.path.exists(d):\n continue\n for t in [f for f in os.listdir(d) if os.path.splitext(f)[1] == '.j2']:\n with open(os.path.join(d, t)) as fh:\n archive.add_contents('msg-templates/%s' % t, fh.read())\n\n function_config = copy.deepcopy(config)\n function_config['templates_folders'] = ['msg-templates/']\n archive.add_contents('config.json', json.dumps(function_config))\n archive.add_contents('periodic.py', entry_source)\n\n archive.close()\n return archive\n\n\ndef provision(config, session_factory):\n func_config = dict(\n name=config.get('lambda_name', 'cloud-custodian-mailer'),\n description=config.get('lambda_description', 'Cloud Custodian Mailer'),\n tags=config.get('lambda_tags', {}),\n handler='periodic.dispatch',\n runtime=config['runtime'],\n memory_size=config['memory'],\n timeout=config['timeout'],\n role=config['role'],\n subnets=config['subnets'],\n security_groups=config['security_groups'],\n dead_letter_config=config.get('dead_letter_config', {}),\n events=[\n CloudWatchEventSource(\n {'type': 'periodic',\n 'schedule': config.get('lambda_schedule', 'rate(5 minutes)')},\n session_factory)\n ])\n\n archive = get_archive(config)\n func = LambdaFunction(func_config, archive)\n log.info(\"Provisioning mailer lambda %s\" % (session_factory().region_name))\n manager = LambdaManager(session_factory)\n manager.publish(func)\n", "path": "tools/c7n_mailer/c7n_mailer/deploy.py"}]}
| 1,337 | 212 |
gh_patches_debug_5811
|
rasdani/github-patches
|
git_diff
|
dbt-labs__dbt-core-4878
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[CT-371] [Bug] CLI vars in packages.yml and selectors.yml don't work.
When we did the work to separate out rendering of secrets, a bug was introduced where instead of using cli_vars to construct the contexts for packages and selectors, we use the entire yaml context (which is also a dict). Because of this we get errors like: "Object of type method is not JSON serializable" and also vars are not found when rendering.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/dbt/config/renderer.py`
Content:
```
1 from typing import Dict, Any, Tuple, Optional, Union, Callable
2
3 from dbt.clients.jinja import get_rendered, catch_jinja
4 from dbt.context.target import TargetContext
5 from dbt.context.secret import SecretContext
6 from dbt.context.base import BaseContext
7 from dbt.contracts.connection import HasCredentials
8 from dbt.exceptions import DbtProjectError, CompilationException, RecursionException
9 from dbt.utils import deep_map_render
10
11
12 Keypath = Tuple[Union[str, int], ...]
13
14
15 class BaseRenderer:
16 def __init__(self, context: Dict[str, Any]) -> None:
17 self.context = context
18
19 @property
20 def name(self):
21 return "Rendering"
22
23 def should_render_keypath(self, keypath: Keypath) -> bool:
24 return True
25
26 def render_entry(self, value: Any, keypath: Keypath) -> Any:
27 if not self.should_render_keypath(keypath):
28 return value
29
30 return self.render_value(value, keypath)
31
32 def render_value(self, value: Any, keypath: Optional[Keypath] = None) -> Any:
33 # keypath is ignored.
34 # if it wasn't read as a string, ignore it
35 if not isinstance(value, str):
36 return value
37 try:
38 with catch_jinja():
39 return get_rendered(value, self.context, native=True)
40 except CompilationException as exc:
41 msg = f"Could not render {value}: {exc.msg}"
42 raise CompilationException(msg) from exc
43
44 def render_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
45 try:
46 return deep_map_render(self.render_entry, data)
47 except RecursionException:
48 raise DbtProjectError(
49 f"Cycle detected: {self.name} input has a reference to itself", project=data
50 )
51
52
53 def _list_if_none(value):
54 if value is None:
55 value = []
56 return value
57
58
59 def _dict_if_none(value):
60 if value is None:
61 value = {}
62 return value
63
64
65 def _list_if_none_or_string(value):
66 value = _list_if_none(value)
67 if isinstance(value, str):
68 return [value]
69 return value
70
71
72 class ProjectPostprocessor(Dict[Keypath, Callable[[Any], Any]]):
73 def __init__(self):
74 super().__init__()
75
76 self[("on-run-start",)] = _list_if_none_or_string
77 self[("on-run-end",)] = _list_if_none_or_string
78
79 for k in ("models", "seeds", "snapshots"):
80 self[(k,)] = _dict_if_none
81 self[(k, "vars")] = _dict_if_none
82 self[(k, "pre-hook")] = _list_if_none_or_string
83 self[(k, "post-hook")] = _list_if_none_or_string
84 self[("seeds", "column_types")] = _dict_if_none
85
86 def postprocess(self, value: Any, key: Keypath) -> Any:
87 if key in self:
88 handler = self[key]
89 return handler(value)
90
91 return value
92
93
94 class DbtProjectYamlRenderer(BaseRenderer):
95 _KEYPATH_HANDLERS = ProjectPostprocessor()
96
97 def __init__(
98 self, profile: Optional[HasCredentials] = None, cli_vars: Optional[Dict[str, Any]] = None
99 ) -> None:
100 # Generate contexts here because we want to save the context
101 # object in order to retrieve the env_vars. This is almost always
102 # a TargetContext, but in the debug task we want a project
103 # even when we don't have a profile.
104 if cli_vars is None:
105 cli_vars = {}
106 if profile:
107 self.ctx_obj = TargetContext(profile, cli_vars)
108 else:
109 self.ctx_obj = BaseContext(cli_vars) # type:ignore
110 context = self.ctx_obj.to_dict()
111 super().__init__(context)
112
113 @property
114 def name(self):
115 "Project config"
116
117 def get_package_renderer(self) -> BaseRenderer:
118 return PackageRenderer(self.context)
119
120 def get_selector_renderer(self) -> BaseRenderer:
121 return SelectorRenderer(self.context)
122
123 def render_project(
124 self,
125 project: Dict[str, Any],
126 project_root: str,
127 ) -> Dict[str, Any]:
128 """Render the project and insert the project root after rendering."""
129 rendered_project = self.render_data(project)
130 rendered_project["project-root"] = project_root
131 return rendered_project
132
133 def render_packages(self, packages: Dict[str, Any]):
134 """Render the given packages dict"""
135 package_renderer = self.get_package_renderer()
136 return package_renderer.render_data(packages)
137
138 def render_selectors(self, selectors: Dict[str, Any]):
139 selector_renderer = self.get_selector_renderer()
140 return selector_renderer.render_data(selectors)
141
142 def render_entry(self, value: Any, keypath: Keypath) -> Any:
143 result = super().render_entry(value, keypath)
144 return self._KEYPATH_HANDLERS.postprocess(result, keypath)
145
146 def should_render_keypath(self, keypath: Keypath) -> bool:
147 if not keypath:
148 return True
149
150 first = keypath[0]
151 # run hooks are not rendered
152 if first in {"on-run-start", "on-run-end", "query-comment"}:
153 return False
154
155 # don't render vars blocks until runtime
156 if first == "vars":
157 return False
158
159 if first in {"seeds", "models", "snapshots", "tests"}:
160 keypath_parts = {(k.lstrip("+ ") if isinstance(k, str) else k) for k in keypath}
161 # model-level hooks
162 if "pre-hook" in keypath_parts or "post-hook" in keypath_parts:
163 return False
164
165 return True
166
167
168 class SelectorRenderer(BaseRenderer):
169 @property
170 def name(self):
171 return "Selector config"
172
173
174 class SecretRenderer(BaseRenderer):
175 def __init__(self, cli_vars: Optional[Dict[str, Any]] = None) -> None:
176 # Generate contexts here because we want to save the context
177 # object in order to retrieve the env_vars.
178 if cli_vars is None:
179 cli_vars = {}
180 self.ctx_obj = SecretContext(cli_vars)
181 context = self.ctx_obj.to_dict()
182 super().__init__(context)
183
184 @property
185 def name(self):
186 return "Secret"
187
188
189 class ProfileRenderer(SecretRenderer):
190 @property
191 def name(self):
192 return "Profile"
193
194
195 class PackageRenderer(SecretRenderer):
196 @property
197 def name(self):
198 return "Packages config"
199
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/core/dbt/config/renderer.py b/core/dbt/config/renderer.py
--- a/core/dbt/config/renderer.py
+++ b/core/dbt/config/renderer.py
@@ -115,10 +115,10 @@
"Project config"
def get_package_renderer(self) -> BaseRenderer:
- return PackageRenderer(self.context)
+ return PackageRenderer(self.ctx_obj.cli_vars)
def get_selector_renderer(self) -> BaseRenderer:
- return SelectorRenderer(self.context)
+ return SelectorRenderer(self.ctx_obj.cli_vars)
def render_project(
self,
|
{"golden_diff": "diff --git a/core/dbt/config/renderer.py b/core/dbt/config/renderer.py\n--- a/core/dbt/config/renderer.py\n+++ b/core/dbt/config/renderer.py\n@@ -115,10 +115,10 @@\n \"Project config\"\n \n def get_package_renderer(self) -> BaseRenderer:\n- return PackageRenderer(self.context)\n+ return PackageRenderer(self.ctx_obj.cli_vars)\n \n def get_selector_renderer(self) -> BaseRenderer:\n- return SelectorRenderer(self.context)\n+ return SelectorRenderer(self.ctx_obj.cli_vars)\n \n def render_project(\n self,\n", "issue": "[CT-371] [Bug] CLI vars in packages.yml and selectors.yml don't work.\nWhen we did the work to separate out rendering of secrets, a bug was introduced where instead of using cli_vars to construct the contexts for packages and selectors, we use the entire yaml context (which is also a dict). Because of this we get errors like: \"Object of type method is not JSON serializable\" and also vars are not found when rendering.\n", "before_files": [{"content": "from typing import Dict, Any, Tuple, Optional, Union, Callable\n\nfrom dbt.clients.jinja import get_rendered, catch_jinja\nfrom dbt.context.target import TargetContext\nfrom dbt.context.secret import SecretContext\nfrom dbt.context.base import BaseContext\nfrom dbt.contracts.connection import HasCredentials\nfrom dbt.exceptions import DbtProjectError, CompilationException, RecursionException\nfrom dbt.utils import deep_map_render\n\n\nKeypath = Tuple[Union[str, int], ...]\n\n\nclass BaseRenderer:\n def __init__(self, context: Dict[str, Any]) -> None:\n self.context = context\n\n @property\n def name(self):\n return \"Rendering\"\n\n def should_render_keypath(self, keypath: Keypath) -> bool:\n return True\n\n def render_entry(self, value: Any, keypath: Keypath) -> Any:\n if not self.should_render_keypath(keypath):\n return value\n\n return self.render_value(value, keypath)\n\n def render_value(self, value: Any, keypath: Optional[Keypath] = None) -> Any:\n # keypath is ignored.\n # if it wasn't read as a string, ignore it\n if not isinstance(value, str):\n return value\n try:\n with catch_jinja():\n return get_rendered(value, self.context, native=True)\n except CompilationException as exc:\n msg = f\"Could not render {value}: {exc.msg}\"\n raise CompilationException(msg) from exc\n\n def render_data(self, data: Dict[str, Any]) -> Dict[str, Any]:\n try:\n return deep_map_render(self.render_entry, data)\n except RecursionException:\n raise DbtProjectError(\n f\"Cycle detected: {self.name} input has a reference to itself\", project=data\n )\n\n\ndef _list_if_none(value):\n if value is None:\n value = []\n return value\n\n\ndef _dict_if_none(value):\n if value is None:\n value = {}\n return value\n\n\ndef _list_if_none_or_string(value):\n value = _list_if_none(value)\n if isinstance(value, str):\n return [value]\n return value\n\n\nclass ProjectPostprocessor(Dict[Keypath, Callable[[Any], Any]]):\n def __init__(self):\n super().__init__()\n\n self[(\"on-run-start\",)] = _list_if_none_or_string\n self[(\"on-run-end\",)] = _list_if_none_or_string\n\n for k in (\"models\", \"seeds\", \"snapshots\"):\n self[(k,)] = _dict_if_none\n self[(k, \"vars\")] = _dict_if_none\n self[(k, \"pre-hook\")] = _list_if_none_or_string\n self[(k, \"post-hook\")] = _list_if_none_or_string\n self[(\"seeds\", \"column_types\")] = _dict_if_none\n\n def postprocess(self, value: Any, key: Keypath) -> Any:\n if key in self:\n handler = self[key]\n return handler(value)\n\n return value\n\n\nclass DbtProjectYamlRenderer(BaseRenderer):\n _KEYPATH_HANDLERS = ProjectPostprocessor()\n\n def __init__(\n self, profile: Optional[HasCredentials] = None, cli_vars: Optional[Dict[str, Any]] = None\n ) -> None:\n # Generate contexts here because we want to save the context\n # object in order to retrieve the env_vars. This is almost always\n # a TargetContext, but in the debug task we want a project\n # even when we don't have a profile.\n if cli_vars is None:\n cli_vars = {}\n if profile:\n self.ctx_obj = TargetContext(profile, cli_vars)\n else:\n self.ctx_obj = BaseContext(cli_vars) # type:ignore\n context = self.ctx_obj.to_dict()\n super().__init__(context)\n\n @property\n def name(self):\n \"Project config\"\n\n def get_package_renderer(self) -> BaseRenderer:\n return PackageRenderer(self.context)\n\n def get_selector_renderer(self) -> BaseRenderer:\n return SelectorRenderer(self.context)\n\n def render_project(\n self,\n project: Dict[str, Any],\n project_root: str,\n ) -> Dict[str, Any]:\n \"\"\"Render the project and insert the project root after rendering.\"\"\"\n rendered_project = self.render_data(project)\n rendered_project[\"project-root\"] = project_root\n return rendered_project\n\n def render_packages(self, packages: Dict[str, Any]):\n \"\"\"Render the given packages dict\"\"\"\n package_renderer = self.get_package_renderer()\n return package_renderer.render_data(packages)\n\n def render_selectors(self, selectors: Dict[str, Any]):\n selector_renderer = self.get_selector_renderer()\n return selector_renderer.render_data(selectors)\n\n def render_entry(self, value: Any, keypath: Keypath) -> Any:\n result = super().render_entry(value, keypath)\n return self._KEYPATH_HANDLERS.postprocess(result, keypath)\n\n def should_render_keypath(self, keypath: Keypath) -> bool:\n if not keypath:\n return True\n\n first = keypath[0]\n # run hooks are not rendered\n if first in {\"on-run-start\", \"on-run-end\", \"query-comment\"}:\n return False\n\n # don't render vars blocks until runtime\n if first == \"vars\":\n return False\n\n if first in {\"seeds\", \"models\", \"snapshots\", \"tests\"}:\n keypath_parts = {(k.lstrip(\"+ \") if isinstance(k, str) else k) for k in keypath}\n # model-level hooks\n if \"pre-hook\" in keypath_parts or \"post-hook\" in keypath_parts:\n return False\n\n return True\n\n\nclass SelectorRenderer(BaseRenderer):\n @property\n def name(self):\n return \"Selector config\"\n\n\nclass SecretRenderer(BaseRenderer):\n def __init__(self, cli_vars: Optional[Dict[str, Any]] = None) -> None:\n # Generate contexts here because we want to save the context\n # object in order to retrieve the env_vars.\n if cli_vars is None:\n cli_vars = {}\n self.ctx_obj = SecretContext(cli_vars)\n context = self.ctx_obj.to_dict()\n super().__init__(context)\n\n @property\n def name(self):\n return \"Secret\"\n\n\nclass ProfileRenderer(SecretRenderer):\n @property\n def name(self):\n return \"Profile\"\n\n\nclass PackageRenderer(SecretRenderer):\n @property\n def name(self):\n return \"Packages config\"\n", "path": "core/dbt/config/renderer.py"}], "after_files": [{"content": "from typing import Dict, Any, Tuple, Optional, Union, Callable\n\nfrom dbt.clients.jinja import get_rendered, catch_jinja\nfrom dbt.context.target import TargetContext\nfrom dbt.context.secret import SecretContext\nfrom dbt.context.base import BaseContext\nfrom dbt.contracts.connection import HasCredentials\nfrom dbt.exceptions import DbtProjectError, CompilationException, RecursionException\nfrom dbt.utils import deep_map_render\n\n\nKeypath = Tuple[Union[str, int], ...]\n\n\nclass BaseRenderer:\n def __init__(self, context: Dict[str, Any]) -> None:\n self.context = context\n\n @property\n def name(self):\n return \"Rendering\"\n\n def should_render_keypath(self, keypath: Keypath) -> bool:\n return True\n\n def render_entry(self, value: Any, keypath: Keypath) -> Any:\n if not self.should_render_keypath(keypath):\n return value\n\n return self.render_value(value, keypath)\n\n def render_value(self, value: Any, keypath: Optional[Keypath] = None) -> Any:\n # keypath is ignored.\n # if it wasn't read as a string, ignore it\n if not isinstance(value, str):\n return value\n try:\n with catch_jinja():\n return get_rendered(value, self.context, native=True)\n except CompilationException as exc:\n msg = f\"Could not render {value}: {exc.msg}\"\n raise CompilationException(msg) from exc\n\n def render_data(self, data: Dict[str, Any]) -> Dict[str, Any]:\n try:\n return deep_map_render(self.render_entry, data)\n except RecursionException:\n raise DbtProjectError(\n f\"Cycle detected: {self.name} input has a reference to itself\", project=data\n )\n\n\ndef _list_if_none(value):\n if value is None:\n value = []\n return value\n\n\ndef _dict_if_none(value):\n if value is None:\n value = {}\n return value\n\n\ndef _list_if_none_or_string(value):\n value = _list_if_none(value)\n if isinstance(value, str):\n return [value]\n return value\n\n\nclass ProjectPostprocessor(Dict[Keypath, Callable[[Any], Any]]):\n def __init__(self):\n super().__init__()\n\n self[(\"on-run-start\",)] = _list_if_none_or_string\n self[(\"on-run-end\",)] = _list_if_none_or_string\n\n for k in (\"models\", \"seeds\", \"snapshots\"):\n self[(k,)] = _dict_if_none\n self[(k, \"vars\")] = _dict_if_none\n self[(k, \"pre-hook\")] = _list_if_none_or_string\n self[(k, \"post-hook\")] = _list_if_none_or_string\n self[(\"seeds\", \"column_types\")] = _dict_if_none\n\n def postprocess(self, value: Any, key: Keypath) -> Any:\n if key in self:\n handler = self[key]\n return handler(value)\n\n return value\n\n\nclass DbtProjectYamlRenderer(BaseRenderer):\n _KEYPATH_HANDLERS = ProjectPostprocessor()\n\n def __init__(\n self, profile: Optional[HasCredentials] = None, cli_vars: Optional[Dict[str, Any]] = None\n ) -> None:\n # Generate contexts here because we want to save the context\n # object in order to retrieve the env_vars. This is almost always\n # a TargetContext, but in the debug task we want a project\n # even when we don't have a profile.\n if cli_vars is None:\n cli_vars = {}\n if profile:\n self.ctx_obj = TargetContext(profile, cli_vars)\n else:\n self.ctx_obj = BaseContext(cli_vars) # type:ignore\n context = self.ctx_obj.to_dict()\n super().__init__(context)\n\n @property\n def name(self):\n \"Project config\"\n\n def get_package_renderer(self) -> BaseRenderer:\n return PackageRenderer(self.ctx_obj.cli_vars)\n\n def get_selector_renderer(self) -> BaseRenderer:\n return SelectorRenderer(self.ctx_obj.cli_vars)\n\n def render_project(\n self,\n project: Dict[str, Any],\n project_root: str,\n ) -> Dict[str, Any]:\n \"\"\"Render the project and insert the project root after rendering.\"\"\"\n rendered_project = self.render_data(project)\n rendered_project[\"project-root\"] = project_root\n return rendered_project\n\n def render_packages(self, packages: Dict[str, Any]):\n \"\"\"Render the given packages dict\"\"\"\n package_renderer = self.get_package_renderer()\n return package_renderer.render_data(packages)\n\n def render_selectors(self, selectors: Dict[str, Any]):\n selector_renderer = self.get_selector_renderer()\n return selector_renderer.render_data(selectors)\n\n def render_entry(self, value: Any, keypath: Keypath) -> Any:\n result = super().render_entry(value, keypath)\n return self._KEYPATH_HANDLERS.postprocess(result, keypath)\n\n def should_render_keypath(self, keypath: Keypath) -> bool:\n if not keypath:\n return True\n\n first = keypath[0]\n # run hooks are not rendered\n if first in {\"on-run-start\", \"on-run-end\", \"query-comment\"}:\n return False\n\n # don't render vars blocks until runtime\n if first == \"vars\":\n return False\n\n if first in {\"seeds\", \"models\", \"snapshots\", \"tests\"}:\n keypath_parts = {(k.lstrip(\"+ \") if isinstance(k, str) else k) for k in keypath}\n # model-level hooks\n if \"pre-hook\" in keypath_parts or \"post-hook\" in keypath_parts:\n return False\n\n return True\n\n\nclass SelectorRenderer(BaseRenderer):\n @property\n def name(self):\n return \"Selector config\"\n\n\nclass SecretRenderer(BaseRenderer):\n def __init__(self, cli_vars: Optional[Dict[str, Any]] = None) -> None:\n # Generate contexts here because we want to save the context\n # object in order to retrieve the env_vars.\n if cli_vars is None:\n cli_vars = {}\n self.ctx_obj = SecretContext(cli_vars)\n context = self.ctx_obj.to_dict()\n super().__init__(context)\n\n @property\n def name(self):\n return \"Secret\"\n\n\nclass ProfileRenderer(SecretRenderer):\n @property\n def name(self):\n return \"Profile\"\n\n\nclass PackageRenderer(SecretRenderer):\n @property\n def name(self):\n return \"Packages config\"\n", "path": "core/dbt/config/renderer.py"}]}
| 2,305 | 128 |
gh_patches_debug_27777
|
rasdani/github-patches
|
git_diff
|
open-mmlab__mmpose-926
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
a dataset bug causing topdown training very slow, wasting 3 min every epoch
i found a dataset bug, i test it on several server(including 8 a100 with 96 core cpu), it all happened. for every epoch, this bug cause about 3min time wasting. i jsut can locat the bug, but i don't known why it happen. it seems only happen when distribution launching.
bug loaction: when you lauch a topdown method, eg, topdown_heatmap/coco/res50_coco_256x192.py, go to /mmcv/runner/epoch_based_runner.py, about line 48. there is such func
self.call_hook('before_train_epoch')
time.sleep(2) # Prevent possible deadlock during epoch transition
for i, data_batch in enumerate(self.data_loader):
self._inner_iter = i
at the every epoch begining, the ( for i, data_batch in enumerate(self.data_loader): ) takes about 3min, it make the training very slow.
you can modify the ori code to the code below to reproduce this issue, this only happen at very epoch begining.
self.call_hook('before_train_epoch')
time.sleep(2) # Prevent possible deadlock during epoch transition
print('before_train_epoch, time:{}'.format(time.time()-start_time))
start_time = time.time()
for i, data_batch in enumerate(self.data_loader):
self._inner_iter = i
print('before_train_iter_load_data, time:{}'.format(time.time()-start_time))
here is my sys information
Python: 3.7.10 | packaged by conda-forge | (default, Feb 19 2021, 16:07:37) [GCC 9.3.0]
CUDA available: True GPU 0,1,2,3,4,5,6,7: A100-SXM4-40GB
CUDA_HOME: /usr/local/cuda-11.1
NVCC: Build cuda_11.1.TC455_06.29190527_0 GCC: gcc (Ubuntu 7.5.0-3ubuntu1~18.04) 7.5.0
PyTorch: 1.8.1+cu111
PyTorch compiling details: PyTorch built with:
- GCC 7.3
- C++ Version: 201402
- Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications
- Intel(R) MKL-DNN v1.7.0 (Git Hash 7aed236906b1f7a05c0917e5257a1af05e9ff683)
- OpenMP 201511 (a.k.a. OpenMP 4.5)
- NNPACK is enabled
- CPU capability usage: AVX2
- CUDA Runtime 11.1
- NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-
gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86
- CuDNN 8.0.5
- Magma 2.5.2
- Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.1, CUDNN_VERSION=8.0.5, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated
-fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -O2 -fPIC -Wno-narrowing -Wall -We
xtra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-variable
-Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-ps
abi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -f
no-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.8.1, USE_CUDA=
ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON,
TorchVision: 0.9.1+cu111
OpenCV: 4.5.3
MMCV: 1.3.8
MMCV Compiler: GCC 7.5
MMCV CUDA Compiler: 11.1
MMPose: 0.15.0+51b4b45
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmpose/apis/train.py`
Content:
```
1 # Copyright (c) OpenMMLab. All rights reserved.
2 import warnings
3
4 import torch
5 from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
6 from mmcv.runner import DistSamplerSeedHook, EpochBasedRunner, OptimizerHook
7
8 from mmpose.core import DistEvalHook, EvalHook, build_optimizers
9 from mmpose.core.distributed_wrapper import DistributedDataParallelWrapper
10 from mmpose.datasets import build_dataloader, build_dataset
11 from mmpose.utils import get_root_logger
12
13 try:
14 from mmcv.runner import Fp16OptimizerHook
15 except ImportError:
16 warnings.warn(
17 'Fp16OptimizerHook from mmpose will be deprecated from '
18 'v0.15.0. Please install mmcv>=1.1.4', DeprecationWarning)
19 from mmpose.core import Fp16OptimizerHook
20
21
22 def train_model(model,
23 dataset,
24 cfg,
25 distributed=False,
26 validate=False,
27 timestamp=None,
28 meta=None):
29 """Train model entry function.
30
31 Args:
32 model (nn.Module): The model to be trained.
33 dataset (Dataset): Train dataset.
34 cfg (dict): The config dict for training.
35 distributed (bool): Whether to use distributed training.
36 Default: False.
37 validate (bool): Whether to do evaluation. Default: False.
38 timestamp (str | None): Local time for runner. Default: None.
39 meta (dict | None): Meta dict to record some important information.
40 Default: None
41 """
42 logger = get_root_logger(cfg.log_level)
43
44 # prepare data loaders
45 dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
46 dataloader_setting = dict(
47 samples_per_gpu=cfg.data.get('samples_per_gpu', {}),
48 workers_per_gpu=cfg.data.get('workers_per_gpu', {}),
49 # cfg.gpus will be ignored if distributed
50 num_gpus=len(cfg.gpu_ids),
51 dist=distributed,
52 seed=cfg.seed)
53 dataloader_setting = dict(dataloader_setting,
54 **cfg.data.get('train_dataloader', {}))
55
56 data_loaders = [
57 build_dataloader(ds, **dataloader_setting) for ds in dataset
58 ]
59
60 # determine wether use adversarial training precess or not
61 use_adverserial_train = cfg.get('use_adversarial_train', False)
62
63 # put model on gpus
64 if distributed:
65 find_unused_parameters = cfg.get('find_unused_parameters', True)
66 # Sets the `find_unused_parameters` parameter in
67 # torch.nn.parallel.DistributedDataParallel
68
69 if use_adverserial_train:
70 # Use DistributedDataParallelWrapper for adversarial training
71 model = DistributedDataParallelWrapper(
72 model,
73 device_ids=[torch.cuda.current_device()],
74 broadcast_buffers=False,
75 find_unused_parameters=find_unused_parameters)
76 else:
77 model = MMDistributedDataParallel(
78 model.cuda(),
79 device_ids=[torch.cuda.current_device()],
80 broadcast_buffers=False,
81 find_unused_parameters=find_unused_parameters)
82 else:
83 model = MMDataParallel(
84 model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
85
86 # build runner
87 optimizer = build_optimizers(model, cfg.optimizer)
88
89 runner = EpochBasedRunner(
90 model,
91 optimizer=optimizer,
92 work_dir=cfg.work_dir,
93 logger=logger,
94 meta=meta)
95 # an ugly workaround to make .log and .log.json filenames the same
96 runner.timestamp = timestamp
97
98 if use_adverserial_train:
99 # The optimizer step process is included in the train_step function
100 # of the model, so the runner should NOT include optimizer hook.
101 optimizer_config = None
102 else:
103 # fp16 setting
104 fp16_cfg = cfg.get('fp16', None)
105 if fp16_cfg is not None:
106 optimizer_config = Fp16OptimizerHook(
107 **cfg.optimizer_config, **fp16_cfg, distributed=distributed)
108 elif distributed and 'type' not in cfg.optimizer_config:
109 optimizer_config = OptimizerHook(**cfg.optimizer_config)
110 else:
111 optimizer_config = cfg.optimizer_config
112
113 # register hooks
114 runner.register_training_hooks(cfg.lr_config, optimizer_config,
115 cfg.checkpoint_config, cfg.log_config,
116 cfg.get('momentum_config', None))
117 if distributed:
118 runner.register_hook(DistSamplerSeedHook())
119
120 # register eval hooks
121 if validate:
122 eval_cfg = cfg.get('evaluation', {})
123 val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
124 dataloader_setting = dict(
125 samples_per_gpu=1,
126 workers_per_gpu=cfg.data.get('workers_per_gpu', {}),
127 # cfg.gpus will be ignored if distributed
128 num_gpus=len(cfg.gpu_ids),
129 dist=distributed,
130 drop_last=False,
131 shuffle=False)
132 dataloader_setting = dict(dataloader_setting,
133 **cfg.data.get('val_dataloader', {}))
134 val_dataloader = build_dataloader(val_dataset, **dataloader_setting)
135 eval_hook = DistEvalHook if distributed else EvalHook
136 runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
137
138 if cfg.resume_from:
139 runner.resume(cfg.resume_from)
140 elif cfg.load_from:
141 runner.load_checkpoint(cfg.load_from)
142 runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
143
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mmpose/apis/train.py b/mmpose/apis/train.py
--- a/mmpose/apis/train.py
+++ b/mmpose/apis/train.py
@@ -43,19 +43,33 @@
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
- dataloader_setting = dict(
- samples_per_gpu=cfg.data.get('samples_per_gpu', {}),
- workers_per_gpu=cfg.data.get('workers_per_gpu', {}),
- # cfg.gpus will be ignored if distributed
- num_gpus=len(cfg.gpu_ids),
- dist=distributed,
- seed=cfg.seed)
- dataloader_setting = dict(dataloader_setting,
- **cfg.data.get('train_dataloader', {}))
-
- data_loaders = [
- build_dataloader(ds, **dataloader_setting) for ds in dataset
- ]
+ # step 1: give default values and override (if exist) from cfg.data
+ loader_cfg = {
+ **dict(
+ seed=cfg.get('seed'),
+ drop_last=False,
+ dist=distributed,
+ num_gpus=len(cfg.gpu_ids)),
+ **({} if torch.__version__ != 'parrots' else dict(
+ prefetch_num=2,
+ pin_memory=False,
+ )),
+ **dict((k, cfg.data[k]) for k in [
+ 'samples_per_gpu',
+ 'workers_per_gpu',
+ 'shuffle',
+ 'seed',
+ 'drop_last',
+ 'prefetch_num',
+ 'pin_memory',
+ 'persistent_workers',
+ ] if k in cfg.data)
+ }
+
+ # step 2: cfg.data.train_dataloader has highest priority
+ train_loader_cfg = dict(loader_cfg, **cfg.data.get('train_dataloader', {}))
+
+ data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset]
# determine wether use adversarial training precess or not
use_adverserial_train = cfg.get('use_adversarial_train', False)
|
{"golden_diff": "diff --git a/mmpose/apis/train.py b/mmpose/apis/train.py\n--- a/mmpose/apis/train.py\n+++ b/mmpose/apis/train.py\n@@ -43,19 +43,33 @@\n \n # prepare data loaders\n dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]\n- dataloader_setting = dict(\n- samples_per_gpu=cfg.data.get('samples_per_gpu', {}),\n- workers_per_gpu=cfg.data.get('workers_per_gpu', {}),\n- # cfg.gpus will be ignored if distributed\n- num_gpus=len(cfg.gpu_ids),\n- dist=distributed,\n- seed=cfg.seed)\n- dataloader_setting = dict(dataloader_setting,\n- **cfg.data.get('train_dataloader', {}))\n-\n- data_loaders = [\n- build_dataloader(ds, **dataloader_setting) for ds in dataset\n- ]\n+ # step 1: give default values and override (if exist) from cfg.data\n+ loader_cfg = {\n+ **dict(\n+ seed=cfg.get('seed'),\n+ drop_last=False,\n+ dist=distributed,\n+ num_gpus=len(cfg.gpu_ids)),\n+ **({} if torch.__version__ != 'parrots' else dict(\n+ prefetch_num=2,\n+ pin_memory=False,\n+ )),\n+ **dict((k, cfg.data[k]) for k in [\n+ 'samples_per_gpu',\n+ 'workers_per_gpu',\n+ 'shuffle',\n+ 'seed',\n+ 'drop_last',\n+ 'prefetch_num',\n+ 'pin_memory',\n+ 'persistent_workers',\n+ ] if k in cfg.data)\n+ }\n+\n+ # step 2: cfg.data.train_dataloader has highest priority\n+ train_loader_cfg = dict(loader_cfg, **cfg.data.get('train_dataloader', {}))\n+\n+ data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset]\n \n # determine wether use adversarial training precess or not\n use_adverserial_train = cfg.get('use_adversarial_train', False)\n", "issue": "a dataset bug causing topdown training very slow, wasting 3 min every epoch\ni found a dataset bug, i test it on several server(including 8 a100 with 96 core cpu), it all happened. for every epoch, this bug cause about 3min time wasting. i jsut can locat the bug, but i don't known why it happen. it seems only happen when distribution launching.\r\n\r\nbug loaction: when you lauch a topdown method, eg, topdown_heatmap/coco/res50_coco_256x192.py, go to /mmcv/runner/epoch_based_runner.py, about line 48. there is such func\r\n\r\n self.call_hook('before_train_epoch')\r\n time.sleep(2) # Prevent possible deadlock during epoch transition\r\n for i, data_batch in enumerate(self.data_loader):\r\n self._inner_iter = i\r\n\r\nat the every epoch begining, the ( for i, data_batch in enumerate(self.data_loader): ) takes about 3min, it make the training very slow.\r\n\r\nyou can modify the ori code to the code below to reproduce this issue, this only happen at very epoch begining.\r\n\r\n self.call_hook('before_train_epoch')\r\n time.sleep(2) # Prevent possible deadlock during epoch transition\r\n print('before_train_epoch, time:{}'.format(time.time()-start_time))\r\n start_time = time.time()\r\n for i, data_batch in enumerate(self.data_loader):\r\n self._inner_iter = i\r\n print('before_train_iter_load_data, time:{}'.format(time.time()-start_time))\r\n\r\nhere is my sys information\r\nPython: 3.7.10 | packaged by conda-forge | (default, Feb 19 2021, 16:07:37) [GCC 9.3.0] \r\nCUDA available: True GPU 0,1,2,3,4,5,6,7: A100-SXM4-40GB \r\nCUDA_HOME: /usr/local/cuda-11.1 \r\nNVCC: Build cuda_11.1.TC455_06.29190527_0 GCC: gcc (Ubuntu 7.5.0-3ubuntu1~18.04) 7.5.0 \r\nPyTorch: 1.8.1+cu111 \r\nPyTorch compiling details: PyTorch built with: \r\n - GCC 7.3 \r\n - C++ Version: 201402 \r\n - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications\r\n - Intel(R) MKL-DNN v1.7.0 (Git Hash 7aed236906b1f7a05c0917e5257a1af05e9ff683)\r\n - OpenMP 201511 (a.k.a. OpenMP 4.5)\r\n - NNPACK is enabled \r\n - CPU capability usage: AVX2 \r\n - CUDA Runtime 11.1\r\n - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-\r\ngencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86\r\n - CuDNN 8.0.5 \r\n - Magma 2.5.2 \r\n - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.1, CUDNN_VERSION=8.0.5, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated\r\n-fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -O2 -fPIC -Wno-narrowing -Wall -We\r\nxtra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-variable\r\n-Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-ps\r\nabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -f\r\nno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.8.1, USE_CUDA=\r\nON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON,\r\n \r\nTorchVision: 0.9.1+cu111 \r\nOpenCV: 4.5.3 \r\nMMCV: 1.3.8 \r\nMMCV Compiler: GCC 7.5\r\nMMCV CUDA Compiler: 11.1\r\nMMPose: 0.15.0+51b4b45\n", "before_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch\nfrom mmcv.parallel import MMDataParallel, MMDistributedDataParallel\nfrom mmcv.runner import DistSamplerSeedHook, EpochBasedRunner, OptimizerHook\n\nfrom mmpose.core import DistEvalHook, EvalHook, build_optimizers\nfrom mmpose.core.distributed_wrapper import DistributedDataParallelWrapper\nfrom mmpose.datasets import build_dataloader, build_dataset\nfrom mmpose.utils import get_root_logger\n\ntry:\n from mmcv.runner import Fp16OptimizerHook\nexcept ImportError:\n warnings.warn(\n 'Fp16OptimizerHook from mmpose will be deprecated from '\n 'v0.15.0. Please install mmcv>=1.1.4', DeprecationWarning)\n from mmpose.core import Fp16OptimizerHook\n\n\ndef train_model(model,\n dataset,\n cfg,\n distributed=False,\n validate=False,\n timestamp=None,\n meta=None):\n \"\"\"Train model entry function.\n\n Args:\n model (nn.Module): The model to be trained.\n dataset (Dataset): Train dataset.\n cfg (dict): The config dict for training.\n distributed (bool): Whether to use distributed training.\n Default: False.\n validate (bool): Whether to do evaluation. Default: False.\n timestamp (str | None): Local time for runner. Default: None.\n meta (dict | None): Meta dict to record some important information.\n Default: None\n \"\"\"\n logger = get_root_logger(cfg.log_level)\n\n # prepare data loaders\n dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]\n dataloader_setting = dict(\n samples_per_gpu=cfg.data.get('samples_per_gpu', {}),\n workers_per_gpu=cfg.data.get('workers_per_gpu', {}),\n # cfg.gpus will be ignored if distributed\n num_gpus=len(cfg.gpu_ids),\n dist=distributed,\n seed=cfg.seed)\n dataloader_setting = dict(dataloader_setting,\n **cfg.data.get('train_dataloader', {}))\n\n data_loaders = [\n build_dataloader(ds, **dataloader_setting) for ds in dataset\n ]\n\n # determine wether use adversarial training precess or not\n use_adverserial_train = cfg.get('use_adversarial_train', False)\n\n # put model on gpus\n if distributed:\n find_unused_parameters = cfg.get('find_unused_parameters', True)\n # Sets the `find_unused_parameters` parameter in\n # torch.nn.parallel.DistributedDataParallel\n\n if use_adverserial_train:\n # Use DistributedDataParallelWrapper for adversarial training\n model = DistributedDataParallelWrapper(\n model,\n device_ids=[torch.cuda.current_device()],\n broadcast_buffers=False,\n find_unused_parameters=find_unused_parameters)\n else:\n model = MMDistributedDataParallel(\n model.cuda(),\n device_ids=[torch.cuda.current_device()],\n broadcast_buffers=False,\n find_unused_parameters=find_unused_parameters)\n else:\n model = MMDataParallel(\n model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)\n\n # build runner\n optimizer = build_optimizers(model, cfg.optimizer)\n\n runner = EpochBasedRunner(\n model,\n optimizer=optimizer,\n work_dir=cfg.work_dir,\n logger=logger,\n meta=meta)\n # an ugly workaround to make .log and .log.json filenames the same\n runner.timestamp = timestamp\n\n if use_adverserial_train:\n # The optimizer step process is included in the train_step function\n # of the model, so the runner should NOT include optimizer hook.\n optimizer_config = None\n else:\n # fp16 setting\n fp16_cfg = cfg.get('fp16', None)\n if fp16_cfg is not None:\n optimizer_config = Fp16OptimizerHook(\n **cfg.optimizer_config, **fp16_cfg, distributed=distributed)\n elif distributed and 'type' not in cfg.optimizer_config:\n optimizer_config = OptimizerHook(**cfg.optimizer_config)\n else:\n optimizer_config = cfg.optimizer_config\n\n # register hooks\n runner.register_training_hooks(cfg.lr_config, optimizer_config,\n cfg.checkpoint_config, cfg.log_config,\n cfg.get('momentum_config', None))\n if distributed:\n runner.register_hook(DistSamplerSeedHook())\n\n # register eval hooks\n if validate:\n eval_cfg = cfg.get('evaluation', {})\n val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))\n dataloader_setting = dict(\n samples_per_gpu=1,\n workers_per_gpu=cfg.data.get('workers_per_gpu', {}),\n # cfg.gpus will be ignored if distributed\n num_gpus=len(cfg.gpu_ids),\n dist=distributed,\n drop_last=False,\n shuffle=False)\n dataloader_setting = dict(dataloader_setting,\n **cfg.data.get('val_dataloader', {}))\n val_dataloader = build_dataloader(val_dataset, **dataloader_setting)\n eval_hook = DistEvalHook if distributed else EvalHook\n runner.register_hook(eval_hook(val_dataloader, **eval_cfg))\n\n if cfg.resume_from:\n runner.resume(cfg.resume_from)\n elif cfg.load_from:\n runner.load_checkpoint(cfg.load_from)\n runner.run(data_loaders, cfg.workflow, cfg.total_epochs)\n", "path": "mmpose/apis/train.py"}], "after_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch\nfrom mmcv.parallel import MMDataParallel, MMDistributedDataParallel\nfrom mmcv.runner import DistSamplerSeedHook, EpochBasedRunner, OptimizerHook\n\nfrom mmpose.core import DistEvalHook, EvalHook, build_optimizers\nfrom mmpose.core.distributed_wrapper import DistributedDataParallelWrapper\nfrom mmpose.datasets import build_dataloader, build_dataset\nfrom mmpose.utils import get_root_logger\n\ntry:\n from mmcv.runner import Fp16OptimizerHook\nexcept ImportError:\n warnings.warn(\n 'Fp16OptimizerHook from mmpose will be deprecated from '\n 'v0.15.0. Please install mmcv>=1.1.4', DeprecationWarning)\n from mmpose.core import Fp16OptimizerHook\n\n\ndef train_model(model,\n dataset,\n cfg,\n distributed=False,\n validate=False,\n timestamp=None,\n meta=None):\n \"\"\"Train model entry function.\n\n Args:\n model (nn.Module): The model to be trained.\n dataset (Dataset): Train dataset.\n cfg (dict): The config dict for training.\n distributed (bool): Whether to use distributed training.\n Default: False.\n validate (bool): Whether to do evaluation. Default: False.\n timestamp (str | None): Local time for runner. Default: None.\n meta (dict | None): Meta dict to record some important information.\n Default: None\n \"\"\"\n logger = get_root_logger(cfg.log_level)\n\n # prepare data loaders\n dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]\n # step 1: give default values and override (if exist) from cfg.data\n loader_cfg = {\n **dict(\n seed=cfg.get('seed'),\n drop_last=False,\n dist=distributed,\n num_gpus=len(cfg.gpu_ids)),\n **({} if torch.__version__ != 'parrots' else dict(\n prefetch_num=2,\n pin_memory=False,\n )),\n **dict((k, cfg.data[k]) for k in [\n 'samples_per_gpu',\n 'workers_per_gpu',\n 'shuffle',\n 'seed',\n 'drop_last',\n 'prefetch_num',\n 'pin_memory',\n 'persistent_workers',\n ] if k in cfg.data)\n }\n\n # step 2: cfg.data.train_dataloader has highest priority\n train_loader_cfg = dict(loader_cfg, **cfg.data.get('train_dataloader', {}))\n\n data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset]\n\n # determine wether use adversarial training precess or not\n use_adverserial_train = cfg.get('use_adversarial_train', False)\n\n # put model on gpus\n if distributed:\n find_unused_parameters = cfg.get('find_unused_parameters', True)\n # Sets the `find_unused_parameters` parameter in\n # torch.nn.parallel.DistributedDataParallel\n\n if use_adverserial_train:\n # Use DistributedDataParallelWrapper for adversarial training\n model = DistributedDataParallelWrapper(\n model,\n device_ids=[torch.cuda.current_device()],\n broadcast_buffers=False,\n find_unused_parameters=find_unused_parameters)\n else:\n model = MMDistributedDataParallel(\n model.cuda(),\n device_ids=[torch.cuda.current_device()],\n broadcast_buffers=False,\n find_unused_parameters=find_unused_parameters)\n else:\n model = MMDataParallel(\n model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)\n\n # build runner\n optimizer = build_optimizers(model, cfg.optimizer)\n\n runner = EpochBasedRunner(\n model,\n optimizer=optimizer,\n work_dir=cfg.work_dir,\n logger=logger,\n meta=meta)\n # an ugly workaround to make .log and .log.json filenames the same\n runner.timestamp = timestamp\n\n if use_adverserial_train:\n # The optimizer step process is included in the train_step function\n # of the model, so the runner should NOT include optimizer hook.\n optimizer_config = None\n else:\n # fp16 setting\n fp16_cfg = cfg.get('fp16', None)\n if fp16_cfg is not None:\n optimizer_config = Fp16OptimizerHook(\n **cfg.optimizer_config, **fp16_cfg, distributed=distributed)\n elif distributed and 'type' not in cfg.optimizer_config:\n optimizer_config = OptimizerHook(**cfg.optimizer_config)\n else:\n optimizer_config = cfg.optimizer_config\n\n # register hooks\n runner.register_training_hooks(cfg.lr_config, optimizer_config,\n cfg.checkpoint_config, cfg.log_config,\n cfg.get('momentum_config', None))\n if distributed:\n runner.register_hook(DistSamplerSeedHook())\n\n # register eval hooks\n if validate:\n eval_cfg = cfg.get('evaluation', {})\n val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))\n dataloader_setting = dict(\n samples_per_gpu=1,\n workers_per_gpu=cfg.data.get('workers_per_gpu', {}),\n # cfg.gpus will be ignored if distributed\n num_gpus=len(cfg.gpu_ids),\n dist=distributed,\n drop_last=False,\n shuffle=False)\n dataloader_setting = dict(dataloader_setting,\n **cfg.data.get('val_dataloader', {}))\n val_dataloader = build_dataloader(val_dataset, **dataloader_setting)\n eval_hook = DistEvalHook if distributed else EvalHook\n runner.register_hook(eval_hook(val_dataloader, **eval_cfg))\n\n if cfg.resume_from:\n runner.resume(cfg.resume_from)\n elif cfg.load_from:\n runner.load_checkpoint(cfg.load_from)\n runner.run(data_loaders, cfg.workflow, cfg.total_epochs)\n", "path": "mmpose/apis/train.py"}]}
| 3,103 | 467 |
gh_patches_debug_17873
|
rasdani/github-patches
|
git_diff
|
PyGithub__PyGithub-1327
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
InputGitTreeElement should allow passing "null" for sha
Github's [Tree creation api](https://developer.github.com/v3/git/trees/#create-a-tree) allows us to pass `sha = null` to indicate that the specified blob needs to be deleted.
However, I don't have a way to pass this info to my `InputGitTreeElement`. I can either give it a str or a `github.GithubObject.NotSet`. This means I have no way of deleting files from a tree using PyGithub (I'd like to delete multiple files in a single commit so tree creation is the ideal choice for me).
The current design is to only pass the `sha` if it is actually set:
https://github.com/PyGithub/PyGithub/blob/540a085001/github/InputGitTreeElement.py#L81
I can understand that passing a `None` goes against the design. I think something like `github.GithubObject.Null` could be introduced to explicitly say that this field is `null`. It can be used everywhere the GH API accepts a null value.
Example
```python
new_tree = repo.create_git_tree(
[
InputGitTreeElement(
path="my/dir/my_file.txt", mode="100644", type="blob", sha=github.GithubObject.Null
),
],
base_tree=head_commit.tree
)
```
This will delete `my/dir/my_file.txt`
---
My current workaround is to directly hit the api to create tree (using requests, setting `sha=None`), get the tree sha & use it with pygithub for my remaining workflow (committing, etc).
Please let me know in case I misunderstood some aspect or if anything needs to be elaborated upon.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `github/InputGitTreeElement.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 ############################ Copyrights and license ############################
4 # #
5 # Copyright 2012 Vincent Jacques <[email protected]> #
6 # Copyright 2012 Zearin <[email protected]> #
7 # Copyright 2013 Vincent Jacques <[email protected]> #
8 # Copyright 2014 Vincent Jacques <[email protected]> #
9 # Copyright 2016 Peter Buckley <[email protected]> #
10 # Copyright 2018 Wan Liuyang <[email protected]> #
11 # Copyright 2018 sfdye <[email protected]> #
12 # #
13 # This file is part of PyGithub. #
14 # http://pygithub.readthedocs.io/ #
15 # #
16 # PyGithub is free software: you can redistribute it and/or modify it under #
17 # the terms of the GNU Lesser General Public License as published by the Free #
18 # Software Foundation, either version 3 of the License, or (at your option) #
19 # any later version. #
20 # #
21 # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
22 # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
23 # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
24 # details. #
25 # #
26 # You should have received a copy of the GNU Lesser General Public License #
27 # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
28 # #
29 ################################################################################
30
31 from __future__ import absolute_import
32
33 import six
34
35 import github.GithubObject
36
37
38 class InputGitTreeElement(object):
39 """
40 This class represents InputGitTreeElements
41 """
42
43 def __init__(
44 self,
45 path,
46 mode,
47 type,
48 content=github.GithubObject.NotSet,
49 sha=github.GithubObject.NotSet,
50 ):
51 """
52 :param path: string
53 :param mode: string
54 :param type: string
55 :param content: string
56 :param sha: string
57 """
58
59 assert isinstance(path, (str, six.text_type)), path
60 assert isinstance(mode, (str, six.text_type)), mode
61 assert isinstance(type, (str, six.text_type)), type
62 assert content is github.GithubObject.NotSet or isinstance(
63 content, (str, six.text_type)
64 ), content
65 assert sha is github.GithubObject.NotSet or isinstance(
66 sha, (str, six.text_type)
67 ), sha
68 self.__path = path
69 self.__mode = mode
70 self.__type = type
71 self.__content = content
72 self.__sha = sha
73
74 @property
75 def _identity(self):
76 identity = {
77 "path": self.__path,
78 "mode": self.__mode,
79 "type": self.__type,
80 }
81 if self.__sha is not github.GithubObject.NotSet:
82 identity["sha"] = self.__sha
83 if self.__content is not github.GithubObject.NotSet:
84 identity["content"] = self.__content
85 return identity
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/github/InputGitTreeElement.py b/github/InputGitTreeElement.py
--- a/github/InputGitTreeElement.py
+++ b/github/InputGitTreeElement.py
@@ -53,7 +53,7 @@
:param mode: string
:param type: string
:param content: string
- :param sha: string
+ :param sha: string or None
"""
assert isinstance(path, (str, six.text_type)), path
@@ -62,8 +62,10 @@
assert content is github.GithubObject.NotSet or isinstance(
content, (str, six.text_type)
), content
- assert sha is github.GithubObject.NotSet or isinstance(
- sha, (str, six.text_type)
+ assert (
+ sha is github.GithubObject.NotSet
+ or sha is None
+ or isinstance(sha, (str, six.text_type))
), sha
self.__path = path
self.__mode = mode
|
{"golden_diff": "diff --git a/github/InputGitTreeElement.py b/github/InputGitTreeElement.py\n--- a/github/InputGitTreeElement.py\n+++ b/github/InputGitTreeElement.py\n@@ -53,7 +53,7 @@\n :param mode: string\n :param type: string\n :param content: string\n- :param sha: string\n+ :param sha: string or None\n \"\"\"\n \n assert isinstance(path, (str, six.text_type)), path\n@@ -62,8 +62,10 @@\n assert content is github.GithubObject.NotSet or isinstance(\n content, (str, six.text_type)\n ), content\n- assert sha is github.GithubObject.NotSet or isinstance(\n- sha, (str, six.text_type)\n+ assert (\n+ sha is github.GithubObject.NotSet\n+ or sha is None\n+ or isinstance(sha, (str, six.text_type))\n ), sha\n self.__path = path\n self.__mode = mode\n", "issue": "InputGitTreeElement should allow passing \"null\" for sha\nGithub's [Tree creation api](https://developer.github.com/v3/git/trees/#create-a-tree) allows us to pass `sha = null` to indicate that the specified blob needs to be deleted.\r\n\r\nHowever, I don't have a way to pass this info to my `InputGitTreeElement`. I can either give it a str or a `github.GithubObject.NotSet`. This means I have no way of deleting files from a tree using PyGithub (I'd like to delete multiple files in a single commit so tree creation is the ideal choice for me).\r\n\r\nThe current design is to only pass the `sha` if it is actually set:\r\nhttps://github.com/PyGithub/PyGithub/blob/540a085001/github/InputGitTreeElement.py#L81\r\n\r\nI can understand that passing a `None` goes against the design. I think something like `github.GithubObject.Null` could be introduced to explicitly say that this field is `null`. It can be used everywhere the GH API accepts a null value.\r\n\r\nExample\r\n```python\r\nnew_tree = repo.create_git_tree(\r\n [\r\n InputGitTreeElement(\r\n path=\"my/dir/my_file.txt\", mode=\"100644\", type=\"blob\", sha=github.GithubObject.Null\r\n ),\r\n ],\r\n base_tree=head_commit.tree\r\n)\r\n```\r\nThis will delete `my/dir/my_file.txt`\r\n\r\n---\r\n\r\nMy current workaround is to directly hit the api to create tree (using requests, setting `sha=None`), get the tree sha & use it with pygithub for my remaining workflow (committing, etc).\r\n\r\nPlease let me know in case I misunderstood some aspect or if anything needs to be elaborated upon.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n############################ Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# Copyright 2014 Vincent Jacques <[email protected]> #\n# Copyright 2016 Peter Buckley <[email protected]> #\n# Copyright 2018 Wan Liuyang <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nfrom __future__ import absolute_import\n\nimport six\n\nimport github.GithubObject\n\n\nclass InputGitTreeElement(object):\n \"\"\"\n This class represents InputGitTreeElements\n \"\"\"\n\n def __init__(\n self,\n path,\n mode,\n type,\n content=github.GithubObject.NotSet,\n sha=github.GithubObject.NotSet,\n ):\n \"\"\"\n :param path: string\n :param mode: string\n :param type: string\n :param content: string\n :param sha: string\n \"\"\"\n\n assert isinstance(path, (str, six.text_type)), path\n assert isinstance(mode, (str, six.text_type)), mode\n assert isinstance(type, (str, six.text_type)), type\n assert content is github.GithubObject.NotSet or isinstance(\n content, (str, six.text_type)\n ), content\n assert sha is github.GithubObject.NotSet or isinstance(\n sha, (str, six.text_type)\n ), sha\n self.__path = path\n self.__mode = mode\n self.__type = type\n self.__content = content\n self.__sha = sha\n\n @property\n def _identity(self):\n identity = {\n \"path\": self.__path,\n \"mode\": self.__mode,\n \"type\": self.__type,\n }\n if self.__sha is not github.GithubObject.NotSet:\n identity[\"sha\"] = self.__sha\n if self.__content is not github.GithubObject.NotSet:\n identity[\"content\"] = self.__content\n return identity\n", "path": "github/InputGitTreeElement.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n############################ Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# Copyright 2014 Vincent Jacques <[email protected]> #\n# Copyright 2016 Peter Buckley <[email protected]> #\n# Copyright 2018 Wan Liuyang <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nfrom __future__ import absolute_import\n\nimport six\n\nimport github.GithubObject\n\n\nclass InputGitTreeElement(object):\n \"\"\"\n This class represents InputGitTreeElements\n \"\"\"\n\n def __init__(\n self,\n path,\n mode,\n type,\n content=github.GithubObject.NotSet,\n sha=github.GithubObject.NotSet,\n ):\n \"\"\"\n :param path: string\n :param mode: string\n :param type: string\n :param content: string\n :param sha: string or None\n \"\"\"\n\n assert isinstance(path, (str, six.text_type)), path\n assert isinstance(mode, (str, six.text_type)), mode\n assert isinstance(type, (str, six.text_type)), type\n assert content is github.GithubObject.NotSet or isinstance(\n content, (str, six.text_type)\n ), content\n assert (\n sha is github.GithubObject.NotSet\n or sha is None\n or isinstance(sha, (str, six.text_type))\n ), sha\n self.__path = path\n self.__mode = mode\n self.__type = type\n self.__content = content\n self.__sha = sha\n\n @property\n def _identity(self):\n identity = {\n \"path\": self.__path,\n \"mode\": self.__mode,\n \"type\": self.__type,\n }\n if self.__sha is not github.GithubObject.NotSet:\n identity[\"sha\"] = self.__sha\n if self.__content is not github.GithubObject.NotSet:\n identity[\"content\"] = self.__content\n return identity\n", "path": "github/InputGitTreeElement.py"}]}
| 1,505 | 223 |
gh_patches_debug_24648
|
rasdani/github-patches
|
git_diff
|
pypa__pip-8079
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
New resolver cannot installs distributions that only have pre releases
**Environment**
* pip version: master, today
* Python version: 3
* OS: linux
**Description**
I want to install a distribution that only has pre-releases. The legacy resolver does support this. The new one does not.
Note: using `--pre` does not seem to influence the result. The legacy resolver could install such distributions without using `--pre`.
**Expected behavior**
Installation should succeed.
**How to Reproduce**
```console
$ pip install --no-deps odoo13-addon-date-range --unstable-feature=resolver
ERROR: Exception:
Traceback (most recent call last):
File "/home/me/pip/src/pip/_internal/cli/base_command.py", line 199, in _main
status = self.run(options, args)
File "/home/me/pip/src/pip/_internal/cli/req_command.py", line 185, in wrapper
return func(self, options, args)
File "/home/me/pip/src/pip/_internal/commands/install.py", line 333, in run
reqs, check_supported_wheels=not options.target_dir
File "/home/me/pip/src/pip/_internal/resolution/resolvelib/resolver.py", line 80, in resolve
self._result = resolver.resolve(requirements)
File "/home/me/pip/src/pip/_vendor/resolvelib/resolvers.py", line 413, in resolve
state = resolution.resolve(requirements, max_rounds=max_rounds)
File "/home/me/pip/src/pip/_vendor/resolvelib/resolvers.py", line 310, in resolve
failure_causes = self._attempt_to_pin_criterion(name, criterion)
File "/home/me/pip/src/pip/_vendor/resolvelib/resolvers.py", line 240, in _attempt_to_pin_criterion
raise InconsistentCandidate(candidate, criterion)
pip._vendor.resolvelib.resolvers.InconsistentCandidate: Provided candidate LinkCandidate('https://files.pythonhosted.org/packages/1f/0b/945335a37082b6b013cc1331f49e3f5b6a18cdd0b693475e6ca9e9a7df6e/odoo13_addon_date_range-13.0.1.0.1.dev8-py3-none-any.whl#sha256=3883bbe87db8d5db4364e8a42e86546e19e8e4f123d98c4e9454587dfa9401df (from https://pypi.org/simple/odoo13-addon-date-range/) (requires-python:>=3.5)') does not satisfy SpecifierRequirement('odoo13-addon-date-range')
```
Note I used `--no-deps` because a dependency is not on pypi, but that has no influence on the result.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pip/_internal/resolution/resolvelib/requirements.py`
Content:
```
1 from pip._vendor.packaging.utils import canonicalize_name
2
3 from pip._internal.utils.typing import MYPY_CHECK_RUNNING
4
5 from .base import Requirement, format_name
6
7 if MYPY_CHECK_RUNNING:
8 from typing import Sequence
9
10 from pip._vendor.packaging.specifiers import SpecifierSet
11
12 from pip._internal.req.req_install import InstallRequirement
13
14 from .base import Candidate
15 from .factory import Factory
16
17
18 class ExplicitRequirement(Requirement):
19 def __init__(self, candidate):
20 # type: (Candidate) -> None
21 self.candidate = candidate
22
23 def __repr__(self):
24 # type: () -> str
25 return "{class_name}({candidate!r})".format(
26 class_name=self.__class__.__name__,
27 candidate=self.candidate,
28 )
29
30 @property
31 def name(self):
32 # type: () -> str
33 # No need to canonicalise - the candidate did this
34 return self.candidate.name
35
36 def find_matches(self):
37 # type: () -> Sequence[Candidate]
38 return [self.candidate]
39
40 def is_satisfied_by(self, candidate):
41 # type: (Candidate) -> bool
42 return candidate == self.candidate
43
44
45 class SpecifierRequirement(Requirement):
46 def __init__(self, ireq, factory):
47 # type: (InstallRequirement, Factory) -> None
48 assert ireq.link is None, "This is a link, not a specifier"
49 self._ireq = ireq
50 self._factory = factory
51 self.extras = ireq.req.extras
52
53 def __str__(self):
54 # type: () -> str
55 return str(self._ireq.req)
56
57 def __repr__(self):
58 # type: () -> str
59 return "{class_name}({requirement!r})".format(
60 class_name=self.__class__.__name__,
61 requirement=str(self._ireq.req),
62 )
63
64 @property
65 def name(self):
66 # type: () -> str
67 canonical_name = canonicalize_name(self._ireq.req.name)
68 return format_name(canonical_name, self.extras)
69
70 def find_matches(self):
71 # type: () -> Sequence[Candidate]
72 it = self._factory.iter_found_candidates(self._ireq, self.extras)
73 return list(it)
74
75 def is_satisfied_by(self, candidate):
76 # type: (Candidate) -> bool
77 assert candidate.name == self.name, \
78 "Internal issue: Candidate is not for this requirement " \
79 " {} vs {}".format(candidate.name, self.name)
80 return candidate.version in self._ireq.req.specifier
81
82
83 class RequiresPythonRequirement(Requirement):
84 """A requirement representing Requires-Python metadata.
85 """
86 def __init__(self, specifier, match):
87 # type: (SpecifierSet, Candidate) -> None
88 self.specifier = specifier
89 self._candidate = match
90
91 def __repr__(self):
92 # type: () -> str
93 return "{class_name}({specifier!r})".format(
94 class_name=self.__class__.__name__,
95 specifier=str(self.specifier),
96 )
97
98 @property
99 def name(self):
100 # type: () -> str
101 return self._candidate.name
102
103 def find_matches(self):
104 # type: () -> Sequence[Candidate]
105 if self._candidate.version in self.specifier:
106 return [self._candidate]
107 return []
108
109 def is_satisfied_by(self, candidate):
110 # type: (Candidate) -> bool
111 assert candidate.name == self._candidate.name, "Not Python candidate"
112 return candidate.version in self.specifier
113
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/pip/_internal/resolution/resolvelib/requirements.py b/src/pip/_internal/resolution/resolvelib/requirements.py
--- a/src/pip/_internal/resolution/resolvelib/requirements.py
+++ b/src/pip/_internal/resolution/resolvelib/requirements.py
@@ -77,7 +77,11 @@
assert candidate.name == self.name, \
"Internal issue: Candidate is not for this requirement " \
" {} vs {}".format(candidate.name, self.name)
- return candidate.version in self._ireq.req.specifier
+ # We can safely always allow prereleases here since PackageFinder
+ # already implements the prerelease logic, and would have filtered out
+ # prerelease candidates if the user does not expect them.
+ spec = self._ireq.req.specifier
+ return spec.contains(candidate.version, prereleases=True)
class RequiresPythonRequirement(Requirement):
@@ -109,4 +113,7 @@
def is_satisfied_by(self, candidate):
# type: (Candidate) -> bool
assert candidate.name == self._candidate.name, "Not Python candidate"
- return candidate.version in self.specifier
+ # We can safely always allow prereleases here since PackageFinder
+ # already implements the prerelease logic, and would have filtered out
+ # prerelease candidates if the user does not expect them.
+ return self.specifier.contains(candidate.version, prereleases=True)
|
{"golden_diff": "diff --git a/src/pip/_internal/resolution/resolvelib/requirements.py b/src/pip/_internal/resolution/resolvelib/requirements.py\n--- a/src/pip/_internal/resolution/resolvelib/requirements.py\n+++ b/src/pip/_internal/resolution/resolvelib/requirements.py\n@@ -77,7 +77,11 @@\n assert candidate.name == self.name, \\\n \"Internal issue: Candidate is not for this requirement \" \\\n \" {} vs {}\".format(candidate.name, self.name)\n- return candidate.version in self._ireq.req.specifier\n+ # We can safely always allow prereleases here since PackageFinder\n+ # already implements the prerelease logic, and would have filtered out\n+ # prerelease candidates if the user does not expect them.\n+ spec = self._ireq.req.specifier\n+ return spec.contains(candidate.version, prereleases=True)\n \n \n class RequiresPythonRequirement(Requirement):\n@@ -109,4 +113,7 @@\n def is_satisfied_by(self, candidate):\n # type: (Candidate) -> bool\n assert candidate.name == self._candidate.name, \"Not Python candidate\"\n- return candidate.version in self.specifier\n+ # We can safely always allow prereleases here since PackageFinder\n+ # already implements the prerelease logic, and would have filtered out\n+ # prerelease candidates if the user does not expect them.\n+ return self.specifier.contains(candidate.version, prereleases=True)\n", "issue": "New resolver cannot installs distributions that only have pre releases\n**Environment**\r\n\r\n* pip version: master, today\r\n* Python version: 3\r\n* OS: linux\r\n\r\n**Description**\r\n\r\nI want to install a distribution that only has pre-releases. The legacy resolver does support this. The new one does not. \r\n\r\nNote: using `--pre` does not seem to influence the result. The legacy resolver could install such distributions without using `--pre`.\r\n\r\n**Expected behavior**\r\n\r\nInstallation should succeed.\r\n\r\n**How to Reproduce**\r\n\r\n```console\r\n$ pip install --no-deps odoo13-addon-date-range --unstable-feature=resolver\r\nERROR: Exception:\r\nTraceback (most recent call last):\r\n File \"/home/me/pip/src/pip/_internal/cli/base_command.py\", line 199, in _main\r\n status = self.run(options, args)\r\n File \"/home/me/pip/src/pip/_internal/cli/req_command.py\", line 185, in wrapper\r\n return func(self, options, args)\r\n File \"/home/me/pip/src/pip/_internal/commands/install.py\", line 333, in run\r\n reqs, check_supported_wheels=not options.target_dir\r\n File \"/home/me/pip/src/pip/_internal/resolution/resolvelib/resolver.py\", line 80, in resolve\r\n self._result = resolver.resolve(requirements)\r\n File \"/home/me/pip/src/pip/_vendor/resolvelib/resolvers.py\", line 413, in resolve\r\n state = resolution.resolve(requirements, max_rounds=max_rounds)\r\n File \"/home/me/pip/src/pip/_vendor/resolvelib/resolvers.py\", line 310, in resolve\r\n failure_causes = self._attempt_to_pin_criterion(name, criterion)\r\n File \"/home/me/pip/src/pip/_vendor/resolvelib/resolvers.py\", line 240, in _attempt_to_pin_criterion\r\n raise InconsistentCandidate(candidate, criterion)\r\npip._vendor.resolvelib.resolvers.InconsistentCandidate: Provided candidate LinkCandidate('https://files.pythonhosted.org/packages/1f/0b/945335a37082b6b013cc1331f49e3f5b6a18cdd0b693475e6ca9e9a7df6e/odoo13_addon_date_range-13.0.1.0.1.dev8-py3-none-any.whl#sha256=3883bbe87db8d5db4364e8a42e86546e19e8e4f123d98c4e9454587dfa9401df (from https://pypi.org/simple/odoo13-addon-date-range/) (requires-python:>=3.5)') does not satisfy SpecifierRequirement('odoo13-addon-date-range')\r\n```\r\n\r\nNote I used `--no-deps` because a dependency is not on pypi, but that has no influence on the result.\n", "before_files": [{"content": "from pip._vendor.packaging.utils import canonicalize_name\n\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\n\nfrom .base import Requirement, format_name\n\nif MYPY_CHECK_RUNNING:\n from typing import Sequence\n\n from pip._vendor.packaging.specifiers import SpecifierSet\n\n from pip._internal.req.req_install import InstallRequirement\n\n from .base import Candidate\n from .factory import Factory\n\n\nclass ExplicitRequirement(Requirement):\n def __init__(self, candidate):\n # type: (Candidate) -> None\n self.candidate = candidate\n\n def __repr__(self):\n # type: () -> str\n return \"{class_name}({candidate!r})\".format(\n class_name=self.__class__.__name__,\n candidate=self.candidate,\n )\n\n @property\n def name(self):\n # type: () -> str\n # No need to canonicalise - the candidate did this\n return self.candidate.name\n\n def find_matches(self):\n # type: () -> Sequence[Candidate]\n return [self.candidate]\n\n def is_satisfied_by(self, candidate):\n # type: (Candidate) -> bool\n return candidate == self.candidate\n\n\nclass SpecifierRequirement(Requirement):\n def __init__(self, ireq, factory):\n # type: (InstallRequirement, Factory) -> None\n assert ireq.link is None, \"This is a link, not a specifier\"\n self._ireq = ireq\n self._factory = factory\n self.extras = ireq.req.extras\n\n def __str__(self):\n # type: () -> str\n return str(self._ireq.req)\n\n def __repr__(self):\n # type: () -> str\n return \"{class_name}({requirement!r})\".format(\n class_name=self.__class__.__name__,\n requirement=str(self._ireq.req),\n )\n\n @property\n def name(self):\n # type: () -> str\n canonical_name = canonicalize_name(self._ireq.req.name)\n return format_name(canonical_name, self.extras)\n\n def find_matches(self):\n # type: () -> Sequence[Candidate]\n it = self._factory.iter_found_candidates(self._ireq, self.extras)\n return list(it)\n\n def is_satisfied_by(self, candidate):\n # type: (Candidate) -> bool\n assert candidate.name == self.name, \\\n \"Internal issue: Candidate is not for this requirement \" \\\n \" {} vs {}\".format(candidate.name, self.name)\n return candidate.version in self._ireq.req.specifier\n\n\nclass RequiresPythonRequirement(Requirement):\n \"\"\"A requirement representing Requires-Python metadata.\n \"\"\"\n def __init__(self, specifier, match):\n # type: (SpecifierSet, Candidate) -> None\n self.specifier = specifier\n self._candidate = match\n\n def __repr__(self):\n # type: () -> str\n return \"{class_name}({specifier!r})\".format(\n class_name=self.__class__.__name__,\n specifier=str(self.specifier),\n )\n\n @property\n def name(self):\n # type: () -> str\n return self._candidate.name\n\n def find_matches(self):\n # type: () -> Sequence[Candidate]\n if self._candidate.version in self.specifier:\n return [self._candidate]\n return []\n\n def is_satisfied_by(self, candidate):\n # type: (Candidate) -> bool\n assert candidate.name == self._candidate.name, \"Not Python candidate\"\n return candidate.version in self.specifier\n", "path": "src/pip/_internal/resolution/resolvelib/requirements.py"}], "after_files": [{"content": "from pip._vendor.packaging.utils import canonicalize_name\n\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\n\nfrom .base import Requirement, format_name\n\nif MYPY_CHECK_RUNNING:\n from typing import Sequence\n\n from pip._vendor.packaging.specifiers import SpecifierSet\n\n from pip._internal.req.req_install import InstallRequirement\n\n from .base import Candidate\n from .factory import Factory\n\n\nclass ExplicitRequirement(Requirement):\n def __init__(self, candidate):\n # type: (Candidate) -> None\n self.candidate = candidate\n\n def __repr__(self):\n # type: () -> str\n return \"{class_name}({candidate!r})\".format(\n class_name=self.__class__.__name__,\n candidate=self.candidate,\n )\n\n @property\n def name(self):\n # type: () -> str\n # No need to canonicalise - the candidate did this\n return self.candidate.name\n\n def find_matches(self):\n # type: () -> Sequence[Candidate]\n return [self.candidate]\n\n def is_satisfied_by(self, candidate):\n # type: (Candidate) -> bool\n return candidate == self.candidate\n\n\nclass SpecifierRequirement(Requirement):\n def __init__(self, ireq, factory):\n # type: (InstallRequirement, Factory) -> None\n assert ireq.link is None, \"This is a link, not a specifier\"\n self._ireq = ireq\n self._factory = factory\n self.extras = ireq.req.extras\n\n def __str__(self):\n # type: () -> str\n return str(self._ireq.req)\n\n def __repr__(self):\n # type: () -> str\n return \"{class_name}({requirement!r})\".format(\n class_name=self.__class__.__name__,\n requirement=str(self._ireq.req),\n )\n\n @property\n def name(self):\n # type: () -> str\n canonical_name = canonicalize_name(self._ireq.req.name)\n return format_name(canonical_name, self.extras)\n\n def find_matches(self):\n # type: () -> Sequence[Candidate]\n it = self._factory.iter_found_candidates(self._ireq, self.extras)\n return list(it)\n\n def is_satisfied_by(self, candidate):\n # type: (Candidate) -> bool\n assert candidate.name == self.name, \\\n \"Internal issue: Candidate is not for this requirement \" \\\n \" {} vs {}\".format(candidate.name, self.name)\n # We can safely always allow prereleases here since PackageFinder\n # already implements the prerelease logic, and would have filtered out\n # prerelease candidates if the user does not expect them.\n spec = self._ireq.req.specifier\n return spec.contains(candidate.version, prereleases=True)\n\n\nclass RequiresPythonRequirement(Requirement):\n \"\"\"A requirement representing Requires-Python metadata.\n \"\"\"\n def __init__(self, specifier, match):\n # type: (SpecifierSet, Candidate) -> None\n self.specifier = specifier\n self._candidate = match\n\n def __repr__(self):\n # type: () -> str\n return \"{class_name}({specifier!r})\".format(\n class_name=self.__class__.__name__,\n specifier=str(self.specifier),\n )\n\n @property\n def name(self):\n # type: () -> str\n return self._candidate.name\n\n def find_matches(self):\n # type: () -> Sequence[Candidate]\n if self._candidate.version in self.specifier:\n return [self._candidate]\n return []\n\n def is_satisfied_by(self, candidate):\n # type: (Candidate) -> bool\n assert candidate.name == self._candidate.name, \"Not Python candidate\"\n # We can safely always allow prereleases here since PackageFinder\n # already implements the prerelease logic, and would have filtered out\n # prerelease candidates if the user does not expect them.\n return self.specifier.contains(candidate.version, prereleases=True)\n", "path": "src/pip/_internal/resolution/resolvelib/requirements.py"}]}
| 1,970 | 324 |
gh_patches_debug_5545
|
rasdani/github-patches
|
git_diff
|
tensorflow__tfx-3864
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update tensorflow-hub requirement to allow 0.12.0?
If the feature is related to a specific library below, please raise an issue in
the respective repo directly:
[TensorFlow Data Validation Repo](https://github.com/tensorflow/data-validation/issues)
[TensorFlow Model Analysis Repo](https://github.com/tensorflow/model-analysis/issues)
[TensorFlow Transform Repo](https://github.com/tensorflow/transform/issues)
[TensorFlow Serving Repo](https://github.com/tensorflow/serving/issues)
**System information**
- TFX Version (you are using): 1.0.0-rc0
- Environment in which you plan to use the feature (e.g., Local
(Linux/MacOS/Windows), Interactive Notebook, Google Cloud, etc..): MacOS, AWS
- Are you willing to contribute it (Yes/No): Yes
**Describe the feature and the current behavior/state.**
tfx (1.0.0-rc0) currently depends on tensorflow-hub (>=0.9.0,<0.10)
I was wondering if we could update tensorflow-hub dependancy for tfx to allow tf-hub 0.12.0, so something like (>=0.9.0,<=0.12.0)?
I am not sure if that would break anything in tfx, but I am happy to investigate and contribute to this change
**Will this change the current API? How?**
No
**Who will benefit with this feature?**
tensorflow-hub has added some new features in 0.10.0 and beyond (specifically the one I'm interested in "`compute_output_shape` in `hub.KerasLayer`" which they added in 0.12.0). It would be cool to be able to take advantage of those while still being able to use tfx
**Do you have a workaround or are completely blocked by this?** :
Blocked
**Name of your Organization (Optional)**
**Any Other info.**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tfx/dependencies.py`
Content:
```
1 # Copyright 2019 Google LLC. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Package dependencies for TFX.
15
16 tfx and family libraries (such as tensorflow-model-analysis) adopts environment
17 variable (TFX_DEPENDENCY_SELECTOR) based dependency version selection. This
18 dependency will be baked in to the wheel, in other words you cannot change
19 dependency string once wheel is built.
20
21 - UNCONSTRAINED uses dependency without any version constraint string, which is
22 useful when you manually build wheels of parent library (e.g. tfx-bsl) of
23 arbitrary version, and install it without dependency constraints conflict.
24 - NIGHTLY uses x.(y+1).0.dev version as a lower version constraint. tfx nightly
25 will transitively depend on nightly versions of other TFX family libraries,
26 and this version constraint is required.
27 - GIT_MASTER uses github master branch URL of the dependency, which is useful
28 during development, or when depending on the github master HEAD version of
29 tfx. This is because tfx github master HEAD version is actually using github
30 master HEAD version of parent libraries.
31 Caveat: URL dependency is not upgraded with --upgrade flag, and you have to
32 specify --force-reinstall flag to fetch the latest change from each master
33 branch HEAD.
34 - For the release, we use a range of version, which is also used as a default.
35 """
36 import os
37
38
39 def select_constraint(default, nightly=None, git_master=None):
40 """Select dependency constraint based on TFX_DEPENDENCY_SELECTOR env var."""
41 selector = os.environ.get('TFX_DEPENDENCY_SELECTOR')
42 if selector == 'UNCONSTRAINED':
43 return ''
44 elif selector == 'NIGHTLY' and nightly is not None:
45 return nightly
46 elif selector == 'GIT_MASTER' and git_master is not None:
47 return git_master
48 else:
49 return default
50
51
52 def make_pipeline_sdk_required_install_packages():
53 return [
54 'absl-py>=0.9,<0.13',
55 'ml-metadata' + select_constraint(
56 # LINT.IfChange
57 default='>=1.0.0,<1.1.0',
58 # LINT.ThenChange(tfx/workspace.bzl)
59 nightly='>=1.1.0.dev',
60 git_master='@git+https://github.com/google/ml-metadata@master'),
61 'packaging>=20,<21',
62 'portpicker>=1.3.1,<2',
63 'protobuf>=3.12.2,<4',
64 'docker>=4.1,<5',
65 # TODO(b/176812386): Deprecate usage of jinja2 for placeholders.
66 'jinja2>=2.7.3,<3',
67 ]
68
69
70 def make_required_install_packages():
71 # Make sure to sync the versions of common dependencies (absl-py, numpy,
72 # and protobuf) with TF.
73 return make_pipeline_sdk_required_install_packages() + [
74 'apache-beam[gcp]>=2.29,<3',
75 'attrs>=19.3.0,<21',
76 'click>=7,<8',
77 'google-api-python-client>=1.7.8,<2',
78 'google-cloud-aiplatform>=0.5.0,<0.8',
79 'google-cloud-bigquery>=1.28.0,<3',
80 'grpcio>=1.28.1,<2',
81 # TODO(b/173976603): remove pinned keras-tuner upperbound when its
82 # dependency expecatation with TensorFlow is sorted out.
83 'keras-tuner>=1,<1.0.2',
84 'kubernetes>=10.0.1,<12',
85 # TODO(b/179195488): remove numpy dependency after 1.20 migration.
86 # This dependency was added only to limit numpy 1.20 installation.
87 'numpy>=1.16,<1.20',
88 'pyarrow>=1,<3',
89 'pyyaml>=3.12,<6',
90 'tensorflow>=1.15.2,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,<3',
91 'tensorflow-hub>=0.9.0,<=0.12.0',
92 'tensorflow-data-validation' + select_constraint(
93 default='>=1.0.0,<1.1.0',
94 nightly='>=1.1.0.dev',
95 git_master='@git+https://github.com/tensorflow/data-validation@master'
96 ),
97 'tensorflow-model-analysis' + select_constraint(
98 default='>=0.31,<0.32',
99 nightly='>=0.32.0.dev',
100 git_master='@git+https://github.com/tensorflow/model-analysis@master'),
101 'tensorflow-serving-api>=1.15,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,<3',
102 'tensorflow-transform' + select_constraint(
103 default='>=1.0.0,<1.1.0',
104 nightly='>=1.1.0.dev',
105 git_master='@git+https://github.com/tensorflow/transform@master'),
106 'tfx-bsl' + select_constraint(
107 default='>=1.0.0,<1.1.0',
108 nightly='>=1.1.0.dev',
109 git_master='@git+https://github.com/tensorflow/tfx-bsl@master'),
110 ]
111
112
113 def make_extra_packages_airflow():
114 """Prepare extra packages needed for Apache Airflow orchestrator."""
115 return [
116 # TODO(b/188940096): update supported version.
117 'apache-airflow[mysql]>=1.10.14,<3',
118 # TODO(b/182848576): Delete pinned sqlalchemy after apache-airflow 2.0.2
119 # or later.(github.com/apache/airflow/issues/14811)
120 'sqlalchemy>=1.3,<1.4',
121 ]
122
123
124 def make_extra_packages_kfp():
125 """Prepare extra packages needed for Kubeflow Pipelines orchestrator."""
126 return [
127 'kfp>=1.6.1,<2',
128 'kfp-pipeline-spec>=0.1.7,<0.2',
129 ]
130
131
132 def make_extra_packages_test():
133 """Prepare extra packages needed for running unit tests."""
134 # Note: It is okay to pin packages to exact versions in this list to minimize
135 # conflicts.
136 return make_extra_packages_airflow() + make_extra_packages_kfp() + [
137 'pytest>=5,<6',
138 ]
139
140
141 def make_extra_packages_docker_image():
142 # Packages needed for tfx docker image.
143 return [
144 'kfp-pipeline-spec>=0.1.7,<0.2',
145 'mmh>=2.2,<3',
146 'python-snappy>=0.5,<0.6',
147 ]
148
149
150 def make_extra_packages_tfjs():
151 # Packages needed for tfjs.
152 return [
153 'tensorflowjs>=3.6.0,<4',
154 ]
155
156
157 def make_extra_packages_tf_ranking():
158 # Packages needed for tf-ranking which is used in tfx/examples/ranking.
159 return [
160 'tensorflow-ranking>=0.3.3,<0.4',
161 'struct2tensor' + select_constraint(
162 default='>=0.31,<0.32',
163 nightly='>=0.32.0.dev',
164 git_master='@git+https://github.com/google/struct2tensor@master'),
165 ]
166
167
168 def make_extra_packages_examples():
169 # Extra dependencies required for tfx/examples.
170 return [
171 # Required for presto ExampleGen custom component in
172 # tfx/examples/custom_components/presto_example_gen
173 'presto-python-client>=0.7,<0.8',
174 # Required for slack custom component in
175 # tfx/examples/custom_components/slack
176 'slackclient>=2.8.2,<3',
177 'websocket-client>=0.57,<1',
178 # Required for bert examples in tfx/examples/bert
179 'tensorflow-text>=1.15.1,<3',
180 # Required for tfx/examples/cifar10
181 'flatbuffers>=1.12,<2',
182 'tflite-support>=0.1.0a1,<0.1.1',
183 # Required for tfx/examples/penguin/experimental
184 # LINT.IfChange
185 'scikit-learn>=0.23,<0.24',
186 # LINT.ThenChange(
187 # examples/penguin/experimental/penguin_pipeline_sklearn_gcp.py)
188 # Required for the experimental tfx/examples using Flax, e.g.,
189 # tfx/examples/penguin.
190 'jax>=0.2.13,<0.3',
191 'jaxlib>=0.1.64,<0.2',
192 'flax>=0.3.3,<0.4',
193 # Required for tfx/examples/penguin/penguin_utils_cloud_tuner.py
194 'tensorflow-cloud>=0.1,<0.2',
195 ]
196
197
198 def make_extra_packages_all():
199 # All extra dependencies.
200 return [
201 *make_extra_packages_test(),
202 *make_extra_packages_tfjs(),
203 *make_extra_packages_tf_ranking(),
204 *make_extra_packages_examples(),
205 ]
206
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tfx/dependencies.py b/tfx/dependencies.py
--- a/tfx/dependencies.py
+++ b/tfx/dependencies.py
@@ -88,7 +88,7 @@
'pyarrow>=1,<3',
'pyyaml>=3.12,<6',
'tensorflow>=1.15.2,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,<3',
- 'tensorflow-hub>=0.9.0,<=0.12.0',
+ 'tensorflow-hub>=0.9.0,<0.13',
'tensorflow-data-validation' + select_constraint(
default='>=1.0.0,<1.1.0',
nightly='>=1.1.0.dev',
|
{"golden_diff": "diff --git a/tfx/dependencies.py b/tfx/dependencies.py\n--- a/tfx/dependencies.py\n+++ b/tfx/dependencies.py\n@@ -88,7 +88,7 @@\n 'pyarrow>=1,<3',\n 'pyyaml>=3.12,<6',\n 'tensorflow>=1.15.2,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,<3',\n- 'tensorflow-hub>=0.9.0,<=0.12.0',\n+ 'tensorflow-hub>=0.9.0,<0.13',\n 'tensorflow-data-validation' + select_constraint(\n default='>=1.0.0,<1.1.0',\n nightly='>=1.1.0.dev',\n", "issue": "Update tensorflow-hub requirement to allow 0.12.0?\nIf the feature is related to a specific library below, please raise an issue in\r\nthe respective repo directly:\r\n\r\n[TensorFlow Data Validation Repo](https://github.com/tensorflow/data-validation/issues)\r\n\r\n[TensorFlow Model Analysis Repo](https://github.com/tensorflow/model-analysis/issues)\r\n\r\n[TensorFlow Transform Repo](https://github.com/tensorflow/transform/issues)\r\n\r\n[TensorFlow Serving Repo](https://github.com/tensorflow/serving/issues)\r\n\r\n**System information**\r\n\r\n- TFX Version (you are using): 1.0.0-rc0\r\n- Environment in which you plan to use the feature (e.g., Local\r\n (Linux/MacOS/Windows), Interactive Notebook, Google Cloud, etc..): MacOS, AWS\r\n- Are you willing to contribute it (Yes/No): Yes\r\n\r\n**Describe the feature and the current behavior/state.**\r\ntfx (1.0.0-rc0) currently depends on tensorflow-hub (>=0.9.0,<0.10)\r\n\r\nI was wondering if we could update tensorflow-hub dependancy for tfx to allow tf-hub 0.12.0, so something like (>=0.9.0,<=0.12.0)?\r\n\r\nI am not sure if that would break anything in tfx, but I am happy to investigate and contribute to this change\r\n\r\n**Will this change the current API? How?**\r\nNo\r\n\r\n**Who will benefit with this feature?**\r\ntensorflow-hub has added some new features in 0.10.0 and beyond (specifically the one I'm interested in \"`compute_output_shape` in `hub.KerasLayer`\" which they added in 0.12.0). It would be cool to be able to take advantage of those while still being able to use tfx\r\n\r\n**Do you have a workaround or are completely blocked by this?** :\r\nBlocked\r\n\r\n**Name of your Organization (Optional)**\r\n\r\n\r\n**Any Other info.**\r\n\n", "before_files": [{"content": "# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Package dependencies for TFX.\n\ntfx and family libraries (such as tensorflow-model-analysis) adopts environment\nvariable (TFX_DEPENDENCY_SELECTOR) based dependency version selection. This\ndependency will be baked in to the wheel, in other words you cannot change\ndependency string once wheel is built.\n\n- UNCONSTRAINED uses dependency without any version constraint string, which is\n useful when you manually build wheels of parent library (e.g. tfx-bsl) of\n arbitrary version, and install it without dependency constraints conflict.\n- NIGHTLY uses x.(y+1).0.dev version as a lower version constraint. tfx nightly\n will transitively depend on nightly versions of other TFX family libraries,\n and this version constraint is required.\n- GIT_MASTER uses github master branch URL of the dependency, which is useful\n during development, or when depending on the github master HEAD version of\n tfx. This is because tfx github master HEAD version is actually using github\n master HEAD version of parent libraries.\n Caveat: URL dependency is not upgraded with --upgrade flag, and you have to\n specify --force-reinstall flag to fetch the latest change from each master\n branch HEAD.\n- For the release, we use a range of version, which is also used as a default.\n\"\"\"\nimport os\n\n\ndef select_constraint(default, nightly=None, git_master=None):\n \"\"\"Select dependency constraint based on TFX_DEPENDENCY_SELECTOR env var.\"\"\"\n selector = os.environ.get('TFX_DEPENDENCY_SELECTOR')\n if selector == 'UNCONSTRAINED':\n return ''\n elif selector == 'NIGHTLY' and nightly is not None:\n return nightly\n elif selector == 'GIT_MASTER' and git_master is not None:\n return git_master\n else:\n return default\n\n\ndef make_pipeline_sdk_required_install_packages():\n return [\n 'absl-py>=0.9,<0.13',\n 'ml-metadata' + select_constraint(\n # LINT.IfChange\n default='>=1.0.0,<1.1.0',\n # LINT.ThenChange(tfx/workspace.bzl)\n nightly='>=1.1.0.dev',\n git_master='@git+https://github.com/google/ml-metadata@master'),\n 'packaging>=20,<21',\n 'portpicker>=1.3.1,<2',\n 'protobuf>=3.12.2,<4',\n 'docker>=4.1,<5',\n # TODO(b/176812386): Deprecate usage of jinja2 for placeholders.\n 'jinja2>=2.7.3,<3',\n ]\n\n\ndef make_required_install_packages():\n # Make sure to sync the versions of common dependencies (absl-py, numpy,\n # and protobuf) with TF.\n return make_pipeline_sdk_required_install_packages() + [\n 'apache-beam[gcp]>=2.29,<3',\n 'attrs>=19.3.0,<21',\n 'click>=7,<8',\n 'google-api-python-client>=1.7.8,<2',\n 'google-cloud-aiplatform>=0.5.0,<0.8',\n 'google-cloud-bigquery>=1.28.0,<3',\n 'grpcio>=1.28.1,<2',\n # TODO(b/173976603): remove pinned keras-tuner upperbound when its\n # dependency expecatation with TensorFlow is sorted out.\n 'keras-tuner>=1,<1.0.2',\n 'kubernetes>=10.0.1,<12',\n # TODO(b/179195488): remove numpy dependency after 1.20 migration.\n # This dependency was added only to limit numpy 1.20 installation.\n 'numpy>=1.16,<1.20',\n 'pyarrow>=1,<3',\n 'pyyaml>=3.12,<6',\n 'tensorflow>=1.15.2,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,<3',\n 'tensorflow-hub>=0.9.0,<=0.12.0',\n 'tensorflow-data-validation' + select_constraint(\n default='>=1.0.0,<1.1.0',\n nightly='>=1.1.0.dev',\n git_master='@git+https://github.com/tensorflow/data-validation@master'\n ),\n 'tensorflow-model-analysis' + select_constraint(\n default='>=0.31,<0.32',\n nightly='>=0.32.0.dev',\n git_master='@git+https://github.com/tensorflow/model-analysis@master'),\n 'tensorflow-serving-api>=1.15,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,<3',\n 'tensorflow-transform' + select_constraint(\n default='>=1.0.0,<1.1.0',\n nightly='>=1.1.0.dev',\n git_master='@git+https://github.com/tensorflow/transform@master'),\n 'tfx-bsl' + select_constraint(\n default='>=1.0.0,<1.1.0',\n nightly='>=1.1.0.dev',\n git_master='@git+https://github.com/tensorflow/tfx-bsl@master'),\n ]\n\n\ndef make_extra_packages_airflow():\n \"\"\"Prepare extra packages needed for Apache Airflow orchestrator.\"\"\"\n return [\n # TODO(b/188940096): update supported version.\n 'apache-airflow[mysql]>=1.10.14,<3',\n # TODO(b/182848576): Delete pinned sqlalchemy after apache-airflow 2.0.2\n # or later.(github.com/apache/airflow/issues/14811)\n 'sqlalchemy>=1.3,<1.4',\n ]\n\n\ndef make_extra_packages_kfp():\n \"\"\"Prepare extra packages needed for Kubeflow Pipelines orchestrator.\"\"\"\n return [\n 'kfp>=1.6.1,<2',\n 'kfp-pipeline-spec>=0.1.7,<0.2',\n ]\n\n\ndef make_extra_packages_test():\n \"\"\"Prepare extra packages needed for running unit tests.\"\"\"\n # Note: It is okay to pin packages to exact versions in this list to minimize\n # conflicts.\n return make_extra_packages_airflow() + make_extra_packages_kfp() + [\n 'pytest>=5,<6',\n ]\n\n\ndef make_extra_packages_docker_image():\n # Packages needed for tfx docker image.\n return [\n 'kfp-pipeline-spec>=0.1.7,<0.2',\n 'mmh>=2.2,<3',\n 'python-snappy>=0.5,<0.6',\n ]\n\n\ndef make_extra_packages_tfjs():\n # Packages needed for tfjs.\n return [\n 'tensorflowjs>=3.6.0,<4',\n ]\n\n\ndef make_extra_packages_tf_ranking():\n # Packages needed for tf-ranking which is used in tfx/examples/ranking.\n return [\n 'tensorflow-ranking>=0.3.3,<0.4',\n 'struct2tensor' + select_constraint(\n default='>=0.31,<0.32',\n nightly='>=0.32.0.dev',\n git_master='@git+https://github.com/google/struct2tensor@master'),\n ]\n\n\ndef make_extra_packages_examples():\n # Extra dependencies required for tfx/examples.\n return [\n # Required for presto ExampleGen custom component in\n # tfx/examples/custom_components/presto_example_gen\n 'presto-python-client>=0.7,<0.8',\n # Required for slack custom component in\n # tfx/examples/custom_components/slack\n 'slackclient>=2.8.2,<3',\n 'websocket-client>=0.57,<1',\n # Required for bert examples in tfx/examples/bert\n 'tensorflow-text>=1.15.1,<3',\n # Required for tfx/examples/cifar10\n 'flatbuffers>=1.12,<2',\n 'tflite-support>=0.1.0a1,<0.1.1',\n # Required for tfx/examples/penguin/experimental\n # LINT.IfChange\n 'scikit-learn>=0.23,<0.24',\n # LINT.ThenChange(\n # examples/penguin/experimental/penguin_pipeline_sklearn_gcp.py)\n # Required for the experimental tfx/examples using Flax, e.g.,\n # tfx/examples/penguin.\n 'jax>=0.2.13,<0.3',\n 'jaxlib>=0.1.64,<0.2',\n 'flax>=0.3.3,<0.4',\n # Required for tfx/examples/penguin/penguin_utils_cloud_tuner.py\n 'tensorflow-cloud>=0.1,<0.2',\n ]\n\n\ndef make_extra_packages_all():\n # All extra dependencies.\n return [\n *make_extra_packages_test(),\n *make_extra_packages_tfjs(),\n *make_extra_packages_tf_ranking(),\n *make_extra_packages_examples(),\n ]\n", "path": "tfx/dependencies.py"}], "after_files": [{"content": "# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Package dependencies for TFX.\n\ntfx and family libraries (such as tensorflow-model-analysis) adopts environment\nvariable (TFX_DEPENDENCY_SELECTOR) based dependency version selection. This\ndependency will be baked in to the wheel, in other words you cannot change\ndependency string once wheel is built.\n\n- UNCONSTRAINED uses dependency without any version constraint string, which is\n useful when you manually build wheels of parent library (e.g. tfx-bsl) of\n arbitrary version, and install it without dependency constraints conflict.\n- NIGHTLY uses x.(y+1).0.dev version as a lower version constraint. tfx nightly\n will transitively depend on nightly versions of other TFX family libraries,\n and this version constraint is required.\n- GIT_MASTER uses github master branch URL of the dependency, which is useful\n during development, or when depending on the github master HEAD version of\n tfx. This is because tfx github master HEAD version is actually using github\n master HEAD version of parent libraries.\n Caveat: URL dependency is not upgraded with --upgrade flag, and you have to\n specify --force-reinstall flag to fetch the latest change from each master\n branch HEAD.\n- For the release, we use a range of version, which is also used as a default.\n\"\"\"\nimport os\n\n\ndef select_constraint(default, nightly=None, git_master=None):\n \"\"\"Select dependency constraint based on TFX_DEPENDENCY_SELECTOR env var.\"\"\"\n selector = os.environ.get('TFX_DEPENDENCY_SELECTOR')\n if selector == 'UNCONSTRAINED':\n return ''\n elif selector == 'NIGHTLY' and nightly is not None:\n return nightly\n elif selector == 'GIT_MASTER' and git_master is not None:\n return git_master\n else:\n return default\n\n\ndef make_pipeline_sdk_required_install_packages():\n return [\n 'absl-py>=0.9,<0.13',\n 'ml-metadata' + select_constraint(\n # LINT.IfChange\n default='>=1.0.0,<1.1.0',\n # LINT.ThenChange(tfx/workspace.bzl)\n nightly='>=1.1.0.dev',\n git_master='@git+https://github.com/google/ml-metadata@master'),\n 'packaging>=20,<21',\n 'portpicker>=1.3.1,<2',\n 'protobuf>=3.12.2,<4',\n 'docker>=4.1,<5',\n # TODO(b/176812386): Deprecate usage of jinja2 for placeholders.\n 'jinja2>=2.7.3,<3',\n ]\n\n\ndef make_required_install_packages():\n # Make sure to sync the versions of common dependencies (absl-py, numpy,\n # and protobuf) with TF.\n return make_pipeline_sdk_required_install_packages() + [\n 'apache-beam[gcp]>=2.29,<3',\n 'attrs>=19.3.0,<21',\n 'click>=7,<8',\n 'google-api-python-client>=1.7.8,<2',\n 'google-cloud-aiplatform>=0.5.0,<0.8',\n 'google-cloud-bigquery>=1.28.0,<3',\n 'grpcio>=1.28.1,<2',\n # TODO(b/173976603): remove pinned keras-tuner upperbound when its\n # dependency expecatation with TensorFlow is sorted out.\n 'keras-tuner>=1,<1.0.2',\n 'kubernetes>=10.0.1,<12',\n # TODO(b/179195488): remove numpy dependency after 1.20 migration.\n # This dependency was added only to limit numpy 1.20 installation.\n 'numpy>=1.16,<1.20',\n 'pyarrow>=1,<3',\n 'pyyaml>=3.12,<6',\n 'tensorflow>=1.15.2,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,<3',\n 'tensorflow-hub>=0.9.0,<0.13',\n 'tensorflow-data-validation' + select_constraint(\n default='>=1.0.0,<1.1.0',\n nightly='>=1.1.0.dev',\n git_master='@git+https://github.com/tensorflow/data-validation@master'\n ),\n 'tensorflow-model-analysis' + select_constraint(\n default='>=0.31,<0.32',\n nightly='>=0.32.0.dev',\n git_master='@git+https://github.com/tensorflow/model-analysis@master'),\n 'tensorflow-serving-api>=1.15,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,<3',\n 'tensorflow-transform' + select_constraint(\n default='>=1.0.0,<1.1.0',\n nightly='>=1.1.0.dev',\n git_master='@git+https://github.com/tensorflow/transform@master'),\n 'tfx-bsl' + select_constraint(\n default='>=1.0.0,<1.1.0',\n nightly='>=1.1.0.dev',\n git_master='@git+https://github.com/tensorflow/tfx-bsl@master'),\n ]\n\n\ndef make_extra_packages_airflow():\n \"\"\"Prepare extra packages needed for Apache Airflow orchestrator.\"\"\"\n return [\n # TODO(b/188940096): update supported version.\n 'apache-airflow[mysql]>=1.10.14,<3',\n # TODO(b/182848576): Delete pinned sqlalchemy after apache-airflow 2.0.2\n # or later.(github.com/apache/airflow/issues/14811)\n 'sqlalchemy>=1.3,<1.4',\n ]\n\n\ndef make_extra_packages_kfp():\n \"\"\"Prepare extra packages needed for Kubeflow Pipelines orchestrator.\"\"\"\n return [\n 'kfp>=1.6.1,<2',\n 'kfp-pipeline-spec>=0.1.7,<0.2',\n ]\n\n\ndef make_extra_packages_test():\n \"\"\"Prepare extra packages needed for running unit tests.\"\"\"\n # Note: It is okay to pin packages to exact versions in this list to minimize\n # conflicts.\n return make_extra_packages_airflow() + make_extra_packages_kfp() + [\n 'pytest>=5,<6',\n ]\n\n\ndef make_extra_packages_docker_image():\n # Packages needed for tfx docker image.\n return [\n 'kfp-pipeline-spec>=0.1.7,<0.2',\n 'mmh>=2.2,<3',\n 'python-snappy>=0.5,<0.6',\n ]\n\n\ndef make_extra_packages_tfjs():\n # Packages needed for tfjs.\n return [\n 'tensorflowjs>=3.6.0,<4',\n ]\n\n\ndef make_extra_packages_tf_ranking():\n # Packages needed for tf-ranking which is used in tfx/examples/ranking.\n return [\n 'tensorflow-ranking>=0.3.3,<0.4',\n 'struct2tensor' + select_constraint(\n default='>=0.31,<0.32',\n nightly='>=0.32.0.dev',\n git_master='@git+https://github.com/google/struct2tensor@master'),\n ]\n\n\ndef make_extra_packages_examples():\n # Extra dependencies required for tfx/examples.\n return [\n # Required for presto ExampleGen custom component in\n # tfx/examples/custom_components/presto_example_gen\n 'presto-python-client>=0.7,<0.8',\n # Required for slack custom component in\n # tfx/examples/custom_components/slack\n 'slackclient>=2.8.2,<3',\n 'websocket-client>=0.57,<1',\n # Required for bert examples in tfx/examples/bert\n 'tensorflow-text>=1.15.1,<3',\n # Required for tfx/examples/cifar10\n 'flatbuffers>=1.12,<2',\n 'tflite-support>=0.1.0a1,<0.1.1',\n # Required for tfx/examples/penguin/experimental\n # LINT.IfChange\n 'scikit-learn>=0.23,<0.24',\n # LINT.ThenChange(\n # examples/penguin/experimental/penguin_pipeline_sklearn_gcp.py)\n # Required for the experimental tfx/examples using Flax, e.g.,\n # tfx/examples/penguin.\n 'jax>=0.2.13,<0.3',\n 'jaxlib>=0.1.64,<0.2',\n 'flax>=0.3.3,<0.4',\n # Required for tfx/examples/penguin/penguin_utils_cloud_tuner.py\n 'tensorflow-cloud>=0.1,<0.2',\n ]\n\n\ndef make_extra_packages_all():\n # All extra dependencies.\n return [\n *make_extra_packages_test(),\n *make_extra_packages_tfjs(),\n *make_extra_packages_tf_ranking(),\n *make_extra_packages_examples(),\n ]\n", "path": "tfx/dependencies.py"}]}
| 3,365 | 180 |
gh_patches_debug_7777
|
rasdani/github-patches
|
git_diff
|
cowrie__cowrie-1685
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
HPFeeds3 UnicodeDecodeError in ttylog.read().encode().hex()
**Describe the bug**
Stack Trace from the cowrie version v2.3.0, as already described in #1307
```
cowrie | 2022-01-23T14:52:17+0000 [twisted.logger._observer#critical] Temporarily disabling observer LegacyLogObserverWrapper(<bound method Output.emit of <cowrie.output.hpfeeds3.Output object at 0x7f4019656490>>) due to exception: [Failure instance: Traceback: <class 'UnicodeDecodeError'>: 'utf-8' codec can't decode byte 0x88 in position 16: invalid start byte
cowrie | /home/cowrie/cowrie/src/cowrie/insults/insults.py:226:connectionLost
cowrie | /usr/lib/python3.9/site-packages/twisted/python/threadable.py:51:sync
cowrie | /usr/lib/python3.9/site-packages/twisted/python/log.py:281:msg
cowrie | /usr/lib/python3.9/site-packages/twisted/logger/_legacy.py:147:publishToNewObserver
cowrie | --- <exception caught here> ---
cowrie | /usr/lib/python3.9/site-packages/twisted/logger/_observer.py:82:__call__
cowrie | /usr/lib/python3.9/site-packages/twisted/logger/_legacy.py:90:__call__
cowrie | /home/cowrie/cowrie/src/cowrie/core/output.py:240:emit
cowrie | /home/cowrie/cowrie/src/cowrie/output/hpfeeds3.py:110:write
cowrie | /usr/lib/python3.9/codecs.py:322:decode
cowrie | ]
cowrie | Traceback (most recent call last):
cowrie | File "/home/cowrie/cowrie/src/cowrie/insults/insults.py", line 226, in connectionLost
cowrie | log.msg(
cowrie | File "/usr/lib/python3.9/site-packages/twisted/python/threadable.py", line 51, in sync
cowrie | return function(self, *args, **kwargs)
cowrie | File "/usr/lib/python3.9/site-packages/twisted/python/log.py", line 281, in msg
cowrie | _publishNew(self._publishPublisher, actualEventDict, textFromEventDict)
cowrie | File "/usr/lib/python3.9/site-packages/twisted/logger/_legacy.py", line 147, in publishToNewObserver
cowrie | observer(eventDict)
cowrie | --- <exception caught here> ---
cowrie | File "/usr/lib/python3.9/site-packages/twisted/logger/_observer.py", line 82, in __call__
cowrie | observer(event)
cowrie | File "/usr/lib/python3.9/site-packages/twisted/logger/_legacy.py", line 90, in __call__
cowrie | self.legacyObserver(event)
cowrie | File "/home/cowrie/cowrie/src/cowrie/core/output.py", line 240, in emit
cowrie | self.write(ev)
cowrie | File "/home/cowrie/cowrie/src/cowrie/output/hpfeeds3.py", line 110, in write
cowrie | self.meta[session]["ttylog"] = ttylog.read().encode().hex()
cowrie | File "/usr/lib/python3.9/codecs.py", line 322, in decode
cowrie | (result, consumed) = self._buffer_decode(data, self.errors, final)
cowrie | builtins.UnicodeDecodeError: 'utf-8' codec can't decode byte 0x88 in position 16: invalid start byte
```
**Server (please complete the following information):**
- OS: Alpine Linux in Docker
- Python: Python 3.9
**Additional context**
The ttylog seems to be a binary file with only parts of it being text.
At the moment the file is opened as a text file, then encoded to utf-8 bytes and then to a hex representation. Opening it as a binary file and directly transforming it to a hex reprenstation should fix it.
HPFeeds3 UnicodeDecodeError in ttylog.read().encode().hex()
**Describe the bug**
Stack Trace from the cowrie version v2.3.0, as already described in #1307
```
cowrie | 2022-01-23T14:52:17+0000 [twisted.logger._observer#critical] Temporarily disabling observer LegacyLogObserverWrapper(<bound method Output.emit of <cowrie.output.hpfeeds3.Output object at 0x7f4019656490>>) due to exception: [Failure instance: Traceback: <class 'UnicodeDecodeError'>: 'utf-8' codec can't decode byte 0x88 in position 16: invalid start byte
cowrie | /home/cowrie/cowrie/src/cowrie/insults/insults.py:226:connectionLost
cowrie | /usr/lib/python3.9/site-packages/twisted/python/threadable.py:51:sync
cowrie | /usr/lib/python3.9/site-packages/twisted/python/log.py:281:msg
cowrie | /usr/lib/python3.9/site-packages/twisted/logger/_legacy.py:147:publishToNewObserver
cowrie | --- <exception caught here> ---
cowrie | /usr/lib/python3.9/site-packages/twisted/logger/_observer.py:82:__call__
cowrie | /usr/lib/python3.9/site-packages/twisted/logger/_legacy.py:90:__call__
cowrie | /home/cowrie/cowrie/src/cowrie/core/output.py:240:emit
cowrie | /home/cowrie/cowrie/src/cowrie/output/hpfeeds3.py:110:write
cowrie | /usr/lib/python3.9/codecs.py:322:decode
cowrie | ]
cowrie | Traceback (most recent call last):
cowrie | File "/home/cowrie/cowrie/src/cowrie/insults/insults.py", line 226, in connectionLost
cowrie | log.msg(
cowrie | File "/usr/lib/python3.9/site-packages/twisted/python/threadable.py", line 51, in sync
cowrie | return function(self, *args, **kwargs)
cowrie | File "/usr/lib/python3.9/site-packages/twisted/python/log.py", line 281, in msg
cowrie | _publishNew(self._publishPublisher, actualEventDict, textFromEventDict)
cowrie | File "/usr/lib/python3.9/site-packages/twisted/logger/_legacy.py", line 147, in publishToNewObserver
cowrie | observer(eventDict)
cowrie | --- <exception caught here> ---
cowrie | File "/usr/lib/python3.9/site-packages/twisted/logger/_observer.py", line 82, in __call__
cowrie | observer(event)
cowrie | File "/usr/lib/python3.9/site-packages/twisted/logger/_legacy.py", line 90, in __call__
cowrie | self.legacyObserver(event)
cowrie | File "/home/cowrie/cowrie/src/cowrie/core/output.py", line 240, in emit
cowrie | self.write(ev)
cowrie | File "/home/cowrie/cowrie/src/cowrie/output/hpfeeds3.py", line 110, in write
cowrie | self.meta[session]["ttylog"] = ttylog.read().encode().hex()
cowrie | File "/usr/lib/python3.9/codecs.py", line 322, in decode
cowrie | (result, consumed) = self._buffer_decode(data, self.errors, final)
cowrie | builtins.UnicodeDecodeError: 'utf-8' codec can't decode byte 0x88 in position 16: invalid start byte
```
**Server (please complete the following information):**
- OS: Alpine Linux in Docker
- Python: Python 3.9
**Additional context**
The ttylog seems to be a binary file with only parts of it being text.
At the moment the file is opened as a text file, then encoded to utf-8 bytes and then to a hex representation. Opening it as a binary file and directly transforming it to a hex reprenstation should fix it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cowrie/output/hpfeeds3.py`
Content:
```
1 """
2 Output plugin for HPFeeds
3 """
4
5 from __future__ import annotations
6
7 import json
8 import logging
9
10 from hpfeeds.twisted import ClientSessionService
11
12 from twisted.internet import endpoints, ssl
13 from twisted.internet import reactor # type: ignore
14 from twisted.python import log
15
16 import cowrie.core.output
17 from cowrie.core.config import CowrieConfig
18
19
20 class Output(cowrie.core.output.Output):
21 """
22 Output plugin for HPFeeds
23 """
24
25 channel = "cowrie.sessions"
26
27 def start(self):
28 if CowrieConfig.has_option("output_hpfeeds3", "channel"):
29 self.channel = CowrieConfig.get("output_hpfeeds3", "channel")
30
31 if CowrieConfig.has_option("output_hpfeeds3", "endpoint"):
32 endpoint = CowrieConfig.get("output_hpfeeds3", "endpoint")
33 else:
34 server = CowrieConfig.get("output_hpfeeds3", "server")
35 port = CowrieConfig.getint("output_hpfeeds3", "port")
36
37 if CowrieConfig.has_option("output_hpfeeds3", "tlscert"):
38 with open(CowrieConfig.get("output_hpfeeds3", "tlscert")) as fp:
39 authority = ssl.Certificate.loadPEM(fp.read())
40 options = ssl.optionsForClientTLS(server, authority)
41 endpoint = endpoints.SSL4ClientEndpoint(reactor, server, port, options)
42 else:
43 endpoint = endpoints.HostnameEndpoint(reactor, server, port)
44
45 ident = CowrieConfig.get("output_hpfeeds3", "identifier")
46 secret = CowrieConfig.get("output_hpfeeds3", "secret")
47
48 self.meta = {}
49
50 self.client = ClientSessionService(endpoint, ident, secret)
51 self.client.startService()
52
53 def stop(self):
54 self.client.stopService()
55
56 def write(self, entry):
57 session = entry["session"]
58 if entry["eventid"] == "cowrie.session.connect":
59 self.meta[session] = {
60 "session": session,
61 "startTime": entry["timestamp"],
62 "endTime": "",
63 "peerIP": entry["src_ip"],
64 "peerPort": entry["src_port"],
65 "hostIP": entry["dst_ip"],
66 "hostPort": entry["dst_port"],
67 "loggedin": None,
68 "credentials": [],
69 "commands": [],
70 "unknownCommands": [],
71 "urls": [],
72 "version": None,
73 "ttylog": None,
74 "hashes": set(),
75 "protocol": entry["protocol"],
76 }
77
78 elif entry["eventid"] == "cowrie.login.success":
79 u, p = entry["username"], entry["password"]
80 self.meta[session]["loggedin"] = (u, p)
81
82 elif entry["eventid"] == "cowrie.login.failed":
83 u, p = entry["username"], entry["password"]
84 self.meta[session]["credentials"].append((u, p))
85
86 elif entry["eventid"] == "cowrie.command.input":
87 c = entry["input"]
88 self.meta[session]["commands"].append(c)
89
90 elif entry["eventid"] == "cowrie.command.failed":
91 uc = entry["input"]
92 self.meta[session]["unknownCommands"].append(uc)
93
94 elif entry["eventid"] == "cowrie.session.file_download":
95 if "url" in entry:
96 url = entry["url"]
97 self.meta[session]["urls"].append(url)
98 self.meta[session]["hashes"].add(entry["shasum"])
99
100 elif entry["eventid"] == "cowrie.session.file_upload":
101 self.meta[session]["hashes"].add(entry["shasum"])
102
103 elif entry["eventid"] == "cowrie.client.version":
104 v = entry["version"]
105 self.meta[session]["version"] = v
106
107 elif entry["eventid"] == "cowrie.log.closed":
108 # entry["ttylog"]
109 with open(entry["ttylog"]) as ttylog:
110 self.meta[session]["ttylog"] = ttylog.read().encode().hex()
111
112 elif entry["eventid"] == "cowrie.session.closed":
113 meta = self.meta.pop(session, None)
114 if meta:
115 log.msg("publishing metadata to hpfeeds", logLevel=logging.DEBUG)
116 meta["endTime"] = entry["timestamp"]
117 meta["hashes"] = list(meta["hashes"])
118 self.client.publish(self.channel, json.dumps(meta).encode("utf-8"))
119
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cowrie/output/hpfeeds3.py b/src/cowrie/output/hpfeeds3.py
--- a/src/cowrie/output/hpfeeds3.py
+++ b/src/cowrie/output/hpfeeds3.py
@@ -106,8 +106,8 @@
elif entry["eventid"] == "cowrie.log.closed":
# entry["ttylog"]
- with open(entry["ttylog"]) as ttylog:
- self.meta[session]["ttylog"] = ttylog.read().encode().hex()
+ with open(entry["ttylog"], 'rb') as ttylog:
+ self.meta[session]["ttylog"] = ttylog.read().hex()
elif entry["eventid"] == "cowrie.session.closed":
meta = self.meta.pop(session, None)
|
{"golden_diff": "diff --git a/src/cowrie/output/hpfeeds3.py b/src/cowrie/output/hpfeeds3.py\n--- a/src/cowrie/output/hpfeeds3.py\n+++ b/src/cowrie/output/hpfeeds3.py\n@@ -106,8 +106,8 @@\n \n elif entry[\"eventid\"] == \"cowrie.log.closed\":\n # entry[\"ttylog\"]\n- with open(entry[\"ttylog\"]) as ttylog:\n- self.meta[session][\"ttylog\"] = ttylog.read().encode().hex()\n+ with open(entry[\"ttylog\"], 'rb') as ttylog:\n+ self.meta[session][\"ttylog\"] = ttylog.read().hex()\n \n elif entry[\"eventid\"] == \"cowrie.session.closed\":\n meta = self.meta.pop(session, None)\n", "issue": "HPFeeds3 UnicodeDecodeError in ttylog.read().encode().hex()\n**Describe the bug**\r\nStack Trace from the cowrie version v2.3.0, as already described in #1307 \r\n\r\n```\r\ncowrie | 2022-01-23T14:52:17+0000 [twisted.logger._observer#critical] Temporarily disabling observer LegacyLogObserverWrapper(<bound method Output.emit of <cowrie.output.hpfeeds3.Output object at 0x7f4019656490>>) due to exception: [Failure instance: Traceback: <class 'UnicodeDecodeError'>: 'utf-8' codec can't decode byte 0x88 in position 16: invalid start byte\r\ncowrie | \t/home/cowrie/cowrie/src/cowrie/insults/insults.py:226:connectionLost\r\ncowrie | \t/usr/lib/python3.9/site-packages/twisted/python/threadable.py:51:sync\r\ncowrie | \t/usr/lib/python3.9/site-packages/twisted/python/log.py:281:msg\r\ncowrie | \t/usr/lib/python3.9/site-packages/twisted/logger/_legacy.py:147:publishToNewObserver\r\ncowrie | \t--- <exception caught here> ---\r\ncowrie | \t/usr/lib/python3.9/site-packages/twisted/logger/_observer.py:82:__call__\r\ncowrie | \t/usr/lib/python3.9/site-packages/twisted/logger/_legacy.py:90:__call__\r\ncowrie | \t/home/cowrie/cowrie/src/cowrie/core/output.py:240:emit\r\ncowrie | \t/home/cowrie/cowrie/src/cowrie/output/hpfeeds3.py:110:write\r\ncowrie | \t/usr/lib/python3.9/codecs.py:322:decode\r\ncowrie | \t]\r\ncowrie | \tTraceback (most recent call last):\r\ncowrie | \t File \"/home/cowrie/cowrie/src/cowrie/insults/insults.py\", line 226, in connectionLost\r\ncowrie | \t log.msg(\r\ncowrie | \t File \"/usr/lib/python3.9/site-packages/twisted/python/threadable.py\", line 51, in sync\r\ncowrie | \t return function(self, *args, **kwargs)\r\ncowrie | \t File \"/usr/lib/python3.9/site-packages/twisted/python/log.py\", line 281, in msg\r\ncowrie | \t _publishNew(self._publishPublisher, actualEventDict, textFromEventDict)\r\ncowrie | \t File \"/usr/lib/python3.9/site-packages/twisted/logger/_legacy.py\", line 147, in publishToNewObserver\r\ncowrie | \t observer(eventDict)\r\ncowrie | \t--- <exception caught here> ---\r\ncowrie | \t File \"/usr/lib/python3.9/site-packages/twisted/logger/_observer.py\", line 82, in __call__\r\ncowrie | \t observer(event)\r\ncowrie | \t File \"/usr/lib/python3.9/site-packages/twisted/logger/_legacy.py\", line 90, in __call__\r\ncowrie | \t self.legacyObserver(event)\r\ncowrie | \t File \"/home/cowrie/cowrie/src/cowrie/core/output.py\", line 240, in emit\r\ncowrie | \t self.write(ev)\r\ncowrie | \t File \"/home/cowrie/cowrie/src/cowrie/output/hpfeeds3.py\", line 110, in write\r\ncowrie | \t self.meta[session][\"ttylog\"] = ttylog.read().encode().hex()\r\ncowrie | \t File \"/usr/lib/python3.9/codecs.py\", line 322, in decode\r\ncowrie | \t (result, consumed) = self._buffer_decode(data, self.errors, final)\r\ncowrie | \tbuiltins.UnicodeDecodeError: 'utf-8' codec can't decode byte 0x88 in position 16: invalid start byte\r\n```\r\n\r\n**Server (please complete the following information):**\r\n - OS: Alpine Linux in Docker\r\n - Python: Python 3.9\r\n\r\n**Additional context**\r\nThe ttylog seems to be a binary file with only parts of it being text. \r\n\r\nAt the moment the file is opened as a text file, then encoded to utf-8 bytes and then to a hex representation. Opening it as a binary file and directly transforming it to a hex reprenstation should fix it.\nHPFeeds3 UnicodeDecodeError in ttylog.read().encode().hex()\n**Describe the bug**\r\nStack Trace from the cowrie version v2.3.0, as already described in #1307 \r\n\r\n```\r\ncowrie | 2022-01-23T14:52:17+0000 [twisted.logger._observer#critical] Temporarily disabling observer LegacyLogObserverWrapper(<bound method Output.emit of <cowrie.output.hpfeeds3.Output object at 0x7f4019656490>>) due to exception: [Failure instance: Traceback: <class 'UnicodeDecodeError'>: 'utf-8' codec can't decode byte 0x88 in position 16: invalid start byte\r\ncowrie | \t/home/cowrie/cowrie/src/cowrie/insults/insults.py:226:connectionLost\r\ncowrie | \t/usr/lib/python3.9/site-packages/twisted/python/threadable.py:51:sync\r\ncowrie | \t/usr/lib/python3.9/site-packages/twisted/python/log.py:281:msg\r\ncowrie | \t/usr/lib/python3.9/site-packages/twisted/logger/_legacy.py:147:publishToNewObserver\r\ncowrie | \t--- <exception caught here> ---\r\ncowrie | \t/usr/lib/python3.9/site-packages/twisted/logger/_observer.py:82:__call__\r\ncowrie | \t/usr/lib/python3.9/site-packages/twisted/logger/_legacy.py:90:__call__\r\ncowrie | \t/home/cowrie/cowrie/src/cowrie/core/output.py:240:emit\r\ncowrie | \t/home/cowrie/cowrie/src/cowrie/output/hpfeeds3.py:110:write\r\ncowrie | \t/usr/lib/python3.9/codecs.py:322:decode\r\ncowrie | \t]\r\ncowrie | \tTraceback (most recent call last):\r\ncowrie | \t File \"/home/cowrie/cowrie/src/cowrie/insults/insults.py\", line 226, in connectionLost\r\ncowrie | \t log.msg(\r\ncowrie | \t File \"/usr/lib/python3.9/site-packages/twisted/python/threadable.py\", line 51, in sync\r\ncowrie | \t return function(self, *args, **kwargs)\r\ncowrie | \t File \"/usr/lib/python3.9/site-packages/twisted/python/log.py\", line 281, in msg\r\ncowrie | \t _publishNew(self._publishPublisher, actualEventDict, textFromEventDict)\r\ncowrie | \t File \"/usr/lib/python3.9/site-packages/twisted/logger/_legacy.py\", line 147, in publishToNewObserver\r\ncowrie | \t observer(eventDict)\r\ncowrie | \t--- <exception caught here> ---\r\ncowrie | \t File \"/usr/lib/python3.9/site-packages/twisted/logger/_observer.py\", line 82, in __call__\r\ncowrie | \t observer(event)\r\ncowrie | \t File \"/usr/lib/python3.9/site-packages/twisted/logger/_legacy.py\", line 90, in __call__\r\ncowrie | \t self.legacyObserver(event)\r\ncowrie | \t File \"/home/cowrie/cowrie/src/cowrie/core/output.py\", line 240, in emit\r\ncowrie | \t self.write(ev)\r\ncowrie | \t File \"/home/cowrie/cowrie/src/cowrie/output/hpfeeds3.py\", line 110, in write\r\ncowrie | \t self.meta[session][\"ttylog\"] = ttylog.read().encode().hex()\r\ncowrie | \t File \"/usr/lib/python3.9/codecs.py\", line 322, in decode\r\ncowrie | \t (result, consumed) = self._buffer_decode(data, self.errors, final)\r\ncowrie | \tbuiltins.UnicodeDecodeError: 'utf-8' codec can't decode byte 0x88 in position 16: invalid start byte\r\n```\r\n\r\n**Server (please complete the following information):**\r\n - OS: Alpine Linux in Docker\r\n - Python: Python 3.9\r\n\r\n**Additional context**\r\nThe ttylog seems to be a binary file with only parts of it being text. \r\n\r\nAt the moment the file is opened as a text file, then encoded to utf-8 bytes and then to a hex representation. Opening it as a binary file and directly transforming it to a hex reprenstation should fix it.\n", "before_files": [{"content": "\"\"\"\nOutput plugin for HPFeeds\n\"\"\"\n\nfrom __future__ import annotations\n\nimport json\nimport logging\n\nfrom hpfeeds.twisted import ClientSessionService\n\nfrom twisted.internet import endpoints, ssl\nfrom twisted.internet import reactor # type: ignore\nfrom twisted.python import log\n\nimport cowrie.core.output\nfrom cowrie.core.config import CowrieConfig\n\n\nclass Output(cowrie.core.output.Output):\n \"\"\"\n Output plugin for HPFeeds\n \"\"\"\n\n channel = \"cowrie.sessions\"\n\n def start(self):\n if CowrieConfig.has_option(\"output_hpfeeds3\", \"channel\"):\n self.channel = CowrieConfig.get(\"output_hpfeeds3\", \"channel\")\n\n if CowrieConfig.has_option(\"output_hpfeeds3\", \"endpoint\"):\n endpoint = CowrieConfig.get(\"output_hpfeeds3\", \"endpoint\")\n else:\n server = CowrieConfig.get(\"output_hpfeeds3\", \"server\")\n port = CowrieConfig.getint(\"output_hpfeeds3\", \"port\")\n\n if CowrieConfig.has_option(\"output_hpfeeds3\", \"tlscert\"):\n with open(CowrieConfig.get(\"output_hpfeeds3\", \"tlscert\")) as fp:\n authority = ssl.Certificate.loadPEM(fp.read())\n options = ssl.optionsForClientTLS(server, authority)\n endpoint = endpoints.SSL4ClientEndpoint(reactor, server, port, options)\n else:\n endpoint = endpoints.HostnameEndpoint(reactor, server, port)\n\n ident = CowrieConfig.get(\"output_hpfeeds3\", \"identifier\")\n secret = CowrieConfig.get(\"output_hpfeeds3\", \"secret\")\n\n self.meta = {}\n\n self.client = ClientSessionService(endpoint, ident, secret)\n self.client.startService()\n\n def stop(self):\n self.client.stopService()\n\n def write(self, entry):\n session = entry[\"session\"]\n if entry[\"eventid\"] == \"cowrie.session.connect\":\n self.meta[session] = {\n \"session\": session,\n \"startTime\": entry[\"timestamp\"],\n \"endTime\": \"\",\n \"peerIP\": entry[\"src_ip\"],\n \"peerPort\": entry[\"src_port\"],\n \"hostIP\": entry[\"dst_ip\"],\n \"hostPort\": entry[\"dst_port\"],\n \"loggedin\": None,\n \"credentials\": [],\n \"commands\": [],\n \"unknownCommands\": [],\n \"urls\": [],\n \"version\": None,\n \"ttylog\": None,\n \"hashes\": set(),\n \"protocol\": entry[\"protocol\"],\n }\n\n elif entry[\"eventid\"] == \"cowrie.login.success\":\n u, p = entry[\"username\"], entry[\"password\"]\n self.meta[session][\"loggedin\"] = (u, p)\n\n elif entry[\"eventid\"] == \"cowrie.login.failed\":\n u, p = entry[\"username\"], entry[\"password\"]\n self.meta[session][\"credentials\"].append((u, p))\n\n elif entry[\"eventid\"] == \"cowrie.command.input\":\n c = entry[\"input\"]\n self.meta[session][\"commands\"].append(c)\n\n elif entry[\"eventid\"] == \"cowrie.command.failed\":\n uc = entry[\"input\"]\n self.meta[session][\"unknownCommands\"].append(uc)\n\n elif entry[\"eventid\"] == \"cowrie.session.file_download\":\n if \"url\" in entry:\n url = entry[\"url\"]\n self.meta[session][\"urls\"].append(url)\n self.meta[session][\"hashes\"].add(entry[\"shasum\"])\n\n elif entry[\"eventid\"] == \"cowrie.session.file_upload\":\n self.meta[session][\"hashes\"].add(entry[\"shasum\"])\n\n elif entry[\"eventid\"] == \"cowrie.client.version\":\n v = entry[\"version\"]\n self.meta[session][\"version\"] = v\n\n elif entry[\"eventid\"] == \"cowrie.log.closed\":\n # entry[\"ttylog\"]\n with open(entry[\"ttylog\"]) as ttylog:\n self.meta[session][\"ttylog\"] = ttylog.read().encode().hex()\n\n elif entry[\"eventid\"] == \"cowrie.session.closed\":\n meta = self.meta.pop(session, None)\n if meta:\n log.msg(\"publishing metadata to hpfeeds\", logLevel=logging.DEBUG)\n meta[\"endTime\"] = entry[\"timestamp\"]\n meta[\"hashes\"] = list(meta[\"hashes\"])\n self.client.publish(self.channel, json.dumps(meta).encode(\"utf-8\"))\n", "path": "src/cowrie/output/hpfeeds3.py"}], "after_files": [{"content": "\"\"\"\nOutput plugin for HPFeeds\n\"\"\"\n\nfrom __future__ import annotations\n\nimport json\nimport logging\n\nfrom hpfeeds.twisted import ClientSessionService\n\nfrom twisted.internet import endpoints, ssl\nfrom twisted.internet import reactor # type: ignore\nfrom twisted.python import log\n\nimport cowrie.core.output\nfrom cowrie.core.config import CowrieConfig\n\n\nclass Output(cowrie.core.output.Output):\n \"\"\"\n Output plugin for HPFeeds\n \"\"\"\n\n channel = \"cowrie.sessions\"\n\n def start(self):\n if CowrieConfig.has_option(\"output_hpfeeds3\", \"channel\"):\n self.channel = CowrieConfig.get(\"output_hpfeeds3\", \"channel\")\n\n if CowrieConfig.has_option(\"output_hpfeeds3\", \"endpoint\"):\n endpoint = CowrieConfig.get(\"output_hpfeeds3\", \"endpoint\")\n else:\n server = CowrieConfig.get(\"output_hpfeeds3\", \"server\")\n port = CowrieConfig.getint(\"output_hpfeeds3\", \"port\")\n\n if CowrieConfig.has_option(\"output_hpfeeds3\", \"tlscert\"):\n with open(CowrieConfig.get(\"output_hpfeeds3\", \"tlscert\")) as fp:\n authority = ssl.Certificate.loadPEM(fp.read())\n options = ssl.optionsForClientTLS(server, authority)\n endpoint = endpoints.SSL4ClientEndpoint(reactor, server, port, options)\n else:\n endpoint = endpoints.HostnameEndpoint(reactor, server, port)\n\n ident = CowrieConfig.get(\"output_hpfeeds3\", \"identifier\")\n secret = CowrieConfig.get(\"output_hpfeeds3\", \"secret\")\n\n self.meta = {}\n\n self.client = ClientSessionService(endpoint, ident, secret)\n self.client.startService()\n\n def stop(self):\n self.client.stopService()\n\n def write(self, entry):\n session = entry[\"session\"]\n if entry[\"eventid\"] == \"cowrie.session.connect\":\n self.meta[session] = {\n \"session\": session,\n \"startTime\": entry[\"timestamp\"],\n \"endTime\": \"\",\n \"peerIP\": entry[\"src_ip\"],\n \"peerPort\": entry[\"src_port\"],\n \"hostIP\": entry[\"dst_ip\"],\n \"hostPort\": entry[\"dst_port\"],\n \"loggedin\": None,\n \"credentials\": [],\n \"commands\": [],\n \"unknownCommands\": [],\n \"urls\": [],\n \"version\": None,\n \"ttylog\": None,\n \"hashes\": set(),\n \"protocol\": entry[\"protocol\"],\n }\n\n elif entry[\"eventid\"] == \"cowrie.login.success\":\n u, p = entry[\"username\"], entry[\"password\"]\n self.meta[session][\"loggedin\"] = (u, p)\n\n elif entry[\"eventid\"] == \"cowrie.login.failed\":\n u, p = entry[\"username\"], entry[\"password\"]\n self.meta[session][\"credentials\"].append((u, p))\n\n elif entry[\"eventid\"] == \"cowrie.command.input\":\n c = entry[\"input\"]\n self.meta[session][\"commands\"].append(c)\n\n elif entry[\"eventid\"] == \"cowrie.command.failed\":\n uc = entry[\"input\"]\n self.meta[session][\"unknownCommands\"].append(uc)\n\n elif entry[\"eventid\"] == \"cowrie.session.file_download\":\n if \"url\" in entry:\n url = entry[\"url\"]\n self.meta[session][\"urls\"].append(url)\n self.meta[session][\"hashes\"].add(entry[\"shasum\"])\n\n elif entry[\"eventid\"] == \"cowrie.session.file_upload\":\n self.meta[session][\"hashes\"].add(entry[\"shasum\"])\n\n elif entry[\"eventid\"] == \"cowrie.client.version\":\n v = entry[\"version\"]\n self.meta[session][\"version\"] = v\n\n elif entry[\"eventid\"] == \"cowrie.log.closed\":\n # entry[\"ttylog\"]\n with open(entry[\"ttylog\"], 'rb') as ttylog:\n self.meta[session][\"ttylog\"] = ttylog.read().hex()\n\n elif entry[\"eventid\"] == \"cowrie.session.closed\":\n meta = self.meta.pop(session, None)\n if meta:\n log.msg(\"publishing metadata to hpfeeds\", logLevel=logging.DEBUG)\n meta[\"endTime\"] = entry[\"timestamp\"]\n meta[\"hashes\"] = list(meta[\"hashes\"])\n self.client.publish(self.channel, json.dumps(meta).encode(\"utf-8\"))\n", "path": "src/cowrie/output/hpfeeds3.py"}]}
| 3,513 | 178 |
gh_patches_debug_18915
|
rasdani/github-patches
|
git_diff
|
dotkom__onlineweb4-2123
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Links to company websites doesn't work without http://
On a company profile page the link to the company's website will only redirect the user if `http://` is specified when the link is added in the dashboard. For example, the link to AppearTV is written as `www.appeartv.com`, and redirects to `https://online.ntnu.no/company/60/www.appeartv.com`.
There is no information to the user creating an event to add http either, so I can imagine this being a growing problem.
Links to company websites doesn't work without http://
On a company profile page the link to the company's website will only redirect the user if `http://` is specified when the link is added in the dashboard. For example, the link to AppearTV is written as `www.appeartv.com`, and redirects to `https://online.ntnu.no/company/60/www.appeartv.com`.
There is no information to the user creating an event to add http either, so I can imagine this being a growing problem.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/companyprofile/dashboard/forms.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from django.forms import ModelForm
3
4 from apps.companyprofile.models import Company
5 from apps.dashboard.widgets import widget_generator
6 from apps.gallery.widgets import SingleImageInput
7
8
9 class CompanyForm(ModelForm):
10
11 class Meta(object):
12 model = Company
13 fields = ('name', 'short_description', 'long_description', 'image', 'site', 'email_address', 'phone_number',)
14 exclude = ['old_image']
15
16 # Widget generator accepts a form widget, and a list of tuples between field name and an attribute dict
17 widgets = widget_generator(SingleImageInput, [('image', {'id': 'responsive-image-id'})])
18
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/apps/companyprofile/dashboard/forms.py b/apps/companyprofile/dashboard/forms.py
--- a/apps/companyprofile/dashboard/forms.py
+++ b/apps/companyprofile/dashboard/forms.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
from django.forms import ModelForm
+from django.forms.fields import URLField
from apps.companyprofile.models import Company
from apps.dashboard.widgets import widget_generator
@@ -7,10 +8,12 @@
class CompanyForm(ModelForm):
+ site = URLField(max_length=100)
class Meta(object):
model = Company
fields = ('name', 'short_description', 'long_description', 'image', 'site', 'email_address', 'phone_number',)
+
exclude = ['old_image']
# Widget generator accepts a form widget, and a list of tuples between field name and an attribute dict
|
{"golden_diff": "diff --git a/apps/companyprofile/dashboard/forms.py b/apps/companyprofile/dashboard/forms.py\n--- a/apps/companyprofile/dashboard/forms.py\n+++ b/apps/companyprofile/dashboard/forms.py\n@@ -1,5 +1,6 @@\n # -*- coding: utf-8 -*-\n from django.forms import ModelForm\n+from django.forms.fields import URLField\n \n from apps.companyprofile.models import Company\n from apps.dashboard.widgets import widget_generator\n@@ -7,10 +8,12 @@\n \n \n class CompanyForm(ModelForm):\n+ site = URLField(max_length=100)\n \n class Meta(object):\n model = Company\n fields = ('name', 'short_description', 'long_description', 'image', 'site', 'email_address', 'phone_number',)\n+\n exclude = ['old_image']\n \n # Widget generator accepts a form widget, and a list of tuples between field name and an attribute dict\n", "issue": "Links to company websites doesn't work without http:// \nOn a company profile page the link to the company's website will only redirect the user if `http://` is specified when the link is added in the dashboard. For example, the link to AppearTV is written as `www.appeartv.com`, and redirects to `https://online.ntnu.no/company/60/www.appeartv.com`.\nThere is no information to the user creating an event to add http either, so I can imagine this being a growing problem. \n\nLinks to company websites doesn't work without http:// \nOn a company profile page the link to the company's website will only redirect the user if `http://` is specified when the link is added in the dashboard. For example, the link to AppearTV is written as `www.appeartv.com`, and redirects to `https://online.ntnu.no/company/60/www.appeartv.com`.\nThere is no information to the user creating an event to add http either, so I can imagine this being a growing problem. \n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom django.forms import ModelForm\n\nfrom apps.companyprofile.models import Company\nfrom apps.dashboard.widgets import widget_generator\nfrom apps.gallery.widgets import SingleImageInput\n\n\nclass CompanyForm(ModelForm):\n\n class Meta(object):\n model = Company\n fields = ('name', 'short_description', 'long_description', 'image', 'site', 'email_address', 'phone_number',)\n exclude = ['old_image']\n\n # Widget generator accepts a form widget, and a list of tuples between field name and an attribute dict\n widgets = widget_generator(SingleImageInput, [('image', {'id': 'responsive-image-id'})])\n", "path": "apps/companyprofile/dashboard/forms.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom django.forms import ModelForm\nfrom django.forms.fields import URLField\n\nfrom apps.companyprofile.models import Company\nfrom apps.dashboard.widgets import widget_generator\nfrom apps.gallery.widgets import SingleImageInput\n\n\nclass CompanyForm(ModelForm):\n site = URLField(max_length=100)\n\n class Meta(object):\n model = Company\n fields = ('name', 'short_description', 'long_description', 'image', 'site', 'email_address', 'phone_number',)\n\n exclude = ['old_image']\n\n # Widget generator accepts a form widget, and a list of tuples between field name and an attribute dict\n widgets = widget_generator(SingleImageInput, [('image', {'id': 'responsive-image-id'})])\n", "path": "apps/companyprofile/dashboard/forms.py"}]}
| 649 | 188 |
gh_patches_debug_32850
|
rasdani/github-patches
|
git_diff
|
iterative__dvc-3316
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
dvc push -q is not completely quiet, shows file transfer tqdm progress bars
Hey guys, love what you've done with DVC.
Had a quick bug that's causing me a little issue. When I use 'dvc push -q' I'm still seeing tqdm progress bars. Wouldn't be a huge issue, but I'm probably pushing 100K 250kb files. This is a local remote, so the transfer speeds are quick. I know in some of my other scripts where I use tqdm, if the iteration time is very small, the tqdm overhead of writing to std::out actually starts to contribute to performance.
dvc version: 0.83.0
os: Windows 10

dvc push -q is not completely quiet, shows file transfer tqdm progress bars
Hey guys, love what you've done with DVC.
Had a quick bug that's causing me a little issue. When I use 'dvc push -q' I'm still seeing tqdm progress bars. Wouldn't be a huge issue, but I'm probably pushing 100K 250kb files. This is a local remote, so the transfer speeds are quick. I know in some of my other scripts where I use tqdm, if the iteration time is very small, the tqdm overhead of writing to std::out actually starts to contribute to performance.
dvc version: 0.83.0
os: Windows 10

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/repo/add.py`
Content:
```
1 import logging
2 import os
3
4 import colorama
5
6 from . import locked
7 from ..exceptions import (
8 RecursiveAddingWhileUsingFilename,
9 OverlappingOutputPathsError,
10 )
11 from ..output.base import OutputDoesNotExistError
12 from ..progress import Tqdm
13 from ..repo.scm_context import scm_context
14 from ..stage import Stage
15 from ..utils import LARGE_DIR_SIZE
16
17 logger = logging.getLogger(__name__)
18
19
20 @locked
21 @scm_context
22 def add(repo, targets, recursive=False, no_commit=False, fname=None):
23 if recursive and fname:
24 raise RecursiveAddingWhileUsingFilename()
25
26 if isinstance(targets, str):
27 targets = [targets]
28
29 stages_list = []
30 num_targets = len(targets)
31 with Tqdm(total=num_targets, desc="Add", unit="file", leave=True) as pbar:
32 if num_targets == 1:
33 # clear unneeded top-level progress bar for single target
34 pbar.bar_format = "Adding..."
35 pbar.refresh()
36 for target in targets:
37 sub_targets = _find_all_targets(repo, target, recursive)
38 pbar.total += len(sub_targets) - 1
39
40 if os.path.isdir(target) and len(sub_targets) > LARGE_DIR_SIZE:
41 logger.warning(
42 "You are adding a large directory '{target}' recursively,"
43 " consider tracking it as a whole instead.\n"
44 "{purple}HINT:{nc} Remove the generated DVC-file and then"
45 " run `{cyan}dvc add {target}{nc}`".format(
46 purple=colorama.Fore.MAGENTA,
47 cyan=colorama.Fore.CYAN,
48 nc=colorama.Style.RESET_ALL,
49 target=target,
50 )
51 )
52
53 stages = _create_stages(repo, sub_targets, fname, pbar=pbar)
54
55 try:
56 repo.check_modified_graph(stages)
57 except OverlappingOutputPathsError as exc:
58 msg = (
59 "Cannot add '{out}', because it is overlapping with other "
60 "DVC tracked output: '{parent}'.\n"
61 "To include '{out}' in '{parent}', run "
62 "'dvc commit {parent_stage}'"
63 ).format(
64 out=exc.overlapping_out.path_info,
65 parent=exc.parent.path_info,
66 parent_stage=exc.parent.stage.relpath,
67 )
68 raise OverlappingOutputPathsError(
69 exc.parent, exc.overlapping_out, msg
70 )
71
72 with Tqdm(
73 total=len(stages),
74 desc="Processing",
75 unit="file",
76 disable=True if len(stages) == 1 else None,
77 ) as pbar_stages:
78 for stage in stages:
79 try:
80 stage.save()
81 except OutputDoesNotExistError:
82 pbar.n -= 1
83 raise
84
85 if not no_commit:
86 stage.commit()
87
88 stage.dump()
89 pbar_stages.update()
90
91 stages_list += stages
92
93 if num_targets == 1: # restore bar format for stats
94 pbar.bar_format = pbar.BAR_FMT_DEFAULT
95
96 return stages_list
97
98
99 def _find_all_targets(repo, target, recursive):
100 if os.path.isdir(target) and recursive:
101 return [
102 fname
103 for fname in Tqdm(
104 repo.tree.walk_files(target),
105 desc="Searching " + target,
106 bar_format=Tqdm.BAR_FMT_NOTOTAL,
107 unit="file",
108 )
109 if not repo.is_dvc_internal(fname)
110 if not Stage.is_stage_file(fname)
111 if not repo.scm.belongs_to_scm(fname)
112 if not repo.scm.is_tracked(fname)
113 ]
114 return [target]
115
116
117 def _create_stages(repo, targets, fname, pbar=None):
118 stages = []
119
120 for out in Tqdm(
121 targets,
122 desc="Creating DVC-files",
123 disable=True if len(targets) < LARGE_DIR_SIZE else None,
124 unit="file",
125 ):
126 stage = Stage.create(
127 repo, outs=[out], accompany_outs=True, fname=fname
128 )
129
130 if not stage:
131 if pbar is not None:
132 pbar.total -= 1
133 continue
134
135 stages.append(stage)
136 if pbar is not None:
137 pbar.update_desc(out)
138
139 return stages
140
```
Path: `dvc/progress.py`
Content:
```
1 """Manages progress bars for DVC repo."""
2
3 import logging
4 import sys
5 from threading import RLock
6
7 from tqdm import tqdm
8
9 from dvc.utils import env2bool
10
11 logger = logging.getLogger(__name__)
12 tqdm.set_lock(RLock())
13
14
15 class Tqdm(tqdm):
16 """
17 maximum-compatibility tqdm-based progressbars
18 """
19
20 BAR_FMT_DEFAULT = (
21 "{percentage:3.0f}% {desc}|{bar}|"
22 "{n_fmt}/{total_fmt}"
23 " [{elapsed}<{remaining}, {rate_fmt:>11}{postfix}]"
24 )
25 # nested bars should have fixed bar widths to align nicely
26 BAR_FMT_DEFAULT_NESTED = (
27 "{percentage:3.0f}%|{bar:10}|{desc:{ncols_desc}.{ncols_desc}}"
28 "{n_fmt}/{total_fmt}"
29 " [{elapsed}<{remaining}, {rate_fmt:>11}{postfix}]"
30 )
31 BAR_FMT_NOTOTAL = (
32 "{desc:{ncols_desc}.{ncols_desc}}{n_fmt}"
33 " [{elapsed}, {rate_fmt:>11}{postfix}]"
34 )
35 BYTES_DEFAULTS = dict(
36 unit="B", unit_scale=True, unit_divisor=1024, miniters=1
37 )
38
39 def __init__(
40 self,
41 iterable=None,
42 disable=None,
43 level=logging.ERROR,
44 desc=None,
45 leave=False,
46 bar_format=None,
47 bytes=False, # pylint: disable=W0622
48 file=None,
49 total=None,
50 **kwargs
51 ):
52 """
53 bytes : shortcut for
54 `unit='B', unit_scale=True, unit_divisor=1024, miniters=1`
55 desc : persists after `close()`
56 level : effective logging level for determining `disable`;
57 used only if `disable` is unspecified
58 disable : If (default: None), will be determined by logging level.
59 May be overridden to `True` due to non-TTY status.
60 Skip override by specifying env var `DVC_IGNORE_ISATTY`.
61 kwargs : anything accepted by `tqdm.tqdm()`
62 """
63 kwargs = kwargs.copy()
64 if bytes:
65 kwargs = {**self.BYTES_DEFAULTS, **kwargs}
66 else:
67 kwargs.setdefault("unit_scale", total > 999 if total else True)
68 if file is None:
69 file = sys.stderr
70 self.desc_persist = desc
71 # auto-disable based on `logger.level`
72 if disable is None:
73 disable = logger.getEffectiveLevel() > level
74 # auto-disable based on TTY
75 if (
76 not disable
77 and not env2bool("DVC_IGNORE_ISATTY")
78 and hasattr(file, "isatty")
79 ):
80 disable = not file.isatty()
81 super().__init__(
82 iterable=iterable,
83 disable=disable,
84 leave=leave,
85 desc=desc,
86 bar_format="!",
87 lock_args=(False,),
88 total=total,
89 **kwargs
90 )
91 if bar_format is None:
92 if self.__len__():
93 self.bar_format = (
94 self.BAR_FMT_DEFAULT_NESTED
95 if self.pos
96 else self.BAR_FMT_DEFAULT
97 )
98 else:
99 self.bar_format = self.BAR_FMT_NOTOTAL
100 else:
101 self.bar_format = bar_format
102 self.refresh()
103
104 def update_desc(self, desc, n=1):
105 """
106 Calls `set_description_str(desc)` and `update(n)`
107 """
108 self.set_description_str(desc, refresh=False)
109 self.update(n)
110
111 def update_to(self, current, total=None):
112 if total:
113 self.total = total # pylint: disable=W0613,W0201
114 self.update(current - self.n)
115
116 def close(self):
117 if self.desc_persist is not None:
118 self.set_description_str(self.desc_persist, refresh=False)
119 # unknown/zero ETA
120 self.bar_format = self.bar_format.replace("<{remaining}", "")
121 # remove completed bar
122 self.bar_format = self.bar_format.replace("|{bar:10}|", " ")
123 super().close()
124
125 @property
126 def format_dict(self):
127 """inject `ncols_desc` to fill the display width (`ncols`)"""
128 d = super().format_dict
129 ncols = d["ncols"] or 80
130 ncols_desc = ncols - len(self.format_meter(ncols_desc=1, **d)) + 1
131 ncols_desc = max(ncols_desc, 0)
132 if ncols_desc:
133 d["ncols_desc"] = ncols_desc
134 else:
135 # work-around for zero-width description
136 d["ncols_desc"] = 1
137 d["prefix"] = ""
138 return d
139
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dvc/progress.py b/dvc/progress.py
--- a/dvc/progress.py
+++ b/dvc/progress.py
@@ -55,7 +55,8 @@
desc : persists after `close()`
level : effective logging level for determining `disable`;
used only if `disable` is unspecified
- disable : If (default: None), will be determined by logging level.
+ disable : If (default: None) or False,
+ will be determined by logging level.
May be overridden to `True` due to non-TTY status.
Skip override by specifying env var `DVC_IGNORE_ISATTY`.
kwargs : anything accepted by `tqdm.tqdm()`
@@ -69,7 +70,7 @@
file = sys.stderr
self.desc_persist = desc
# auto-disable based on `logger.level`
- if disable is None:
+ if not disable:
disable = logger.getEffectiveLevel() > level
# auto-disable based on TTY
if (
diff --git a/dvc/repo/add.py b/dvc/repo/add.py
--- a/dvc/repo/add.py
+++ b/dvc/repo/add.py
@@ -55,7 +55,7 @@
total=len(stages),
desc="Processing",
unit="file",
- disable=True if len(stages) == 1 else None,
+ disable=len(stages) == 1,
) as pbar_stages:
for stage in stages:
try:
@@ -102,7 +102,7 @@
for out in Tqdm(
targets,
desc="Creating DVC-files",
- disable=True if len(targets) < LARGE_DIR_SIZE else None,
+ disable=len(targets) < LARGE_DIR_SIZE,
unit="file",
):
stage = Stage.create(repo, outs=[out], add=True, fname=fname)
|
{"golden_diff": "diff --git a/dvc/progress.py b/dvc/progress.py\n--- a/dvc/progress.py\n+++ b/dvc/progress.py\n@@ -55,7 +55,8 @@\n desc : persists after `close()`\n level : effective logging level for determining `disable`;\n used only if `disable` is unspecified\n- disable : If (default: None), will be determined by logging level.\n+ disable : If (default: None) or False,\n+ will be determined by logging level.\n May be overridden to `True` due to non-TTY status.\n Skip override by specifying env var `DVC_IGNORE_ISATTY`.\n kwargs : anything accepted by `tqdm.tqdm()`\n@@ -69,7 +70,7 @@\n file = sys.stderr\n self.desc_persist = desc\n # auto-disable based on `logger.level`\n- if disable is None:\n+ if not disable:\n disable = logger.getEffectiveLevel() > level\n # auto-disable based on TTY\n if (\ndiff --git a/dvc/repo/add.py b/dvc/repo/add.py\n--- a/dvc/repo/add.py\n+++ b/dvc/repo/add.py\n@@ -55,7 +55,7 @@\n total=len(stages),\n desc=\"Processing\",\n unit=\"file\",\n- disable=True if len(stages) == 1 else None,\n+ disable=len(stages) == 1,\n ) as pbar_stages:\n for stage in stages:\n try:\n@@ -102,7 +102,7 @@\n for out in Tqdm(\n targets,\n desc=\"Creating DVC-files\",\n- disable=True if len(targets) < LARGE_DIR_SIZE else None,\n+ disable=len(targets) < LARGE_DIR_SIZE,\n unit=\"file\",\n ):\n stage = Stage.create(repo, outs=[out], add=True, fname=fname)\n", "issue": "dvc push -q is not completely quiet, shows file transfer tqdm progress bars\nHey guys, love what you've done with DVC.\r\n\r\nHad a quick bug that's causing me a little issue. When I use 'dvc push -q' I'm still seeing tqdm progress bars. Wouldn't be a huge issue, but I'm probably pushing 100K 250kb files. This is a local remote, so the transfer speeds are quick. I know in some of my other scripts where I use tqdm, if the iteration time is very small, the tqdm overhead of writing to std::out actually starts to contribute to performance.\r\n\r\ndvc version: 0.83.0\r\nos: Windows 10\r\n\r\n\r\n\r\n\r\n\ndvc push -q is not completely quiet, shows file transfer tqdm progress bars\nHey guys, love what you've done with DVC.\r\n\r\nHad a quick bug that's causing me a little issue. When I use 'dvc push -q' I'm still seeing tqdm progress bars. Wouldn't be a huge issue, but I'm probably pushing 100K 250kb files. This is a local remote, so the transfer speeds are quick. I know in some of my other scripts where I use tqdm, if the iteration time is very small, the tqdm overhead of writing to std::out actually starts to contribute to performance.\r\n\r\ndvc version: 0.83.0\r\nos: Windows 10\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import logging\nimport os\n\nimport colorama\n\nfrom . import locked\nfrom ..exceptions import (\n RecursiveAddingWhileUsingFilename,\n OverlappingOutputPathsError,\n)\nfrom ..output.base import OutputDoesNotExistError\nfrom ..progress import Tqdm\nfrom ..repo.scm_context import scm_context\nfrom ..stage import Stage\nfrom ..utils import LARGE_DIR_SIZE\n\nlogger = logging.getLogger(__name__)\n\n\n@locked\n@scm_context\ndef add(repo, targets, recursive=False, no_commit=False, fname=None):\n if recursive and fname:\n raise RecursiveAddingWhileUsingFilename()\n\n if isinstance(targets, str):\n targets = [targets]\n\n stages_list = []\n num_targets = len(targets)\n with Tqdm(total=num_targets, desc=\"Add\", unit=\"file\", leave=True) as pbar:\n if num_targets == 1:\n # clear unneeded top-level progress bar for single target\n pbar.bar_format = \"Adding...\"\n pbar.refresh()\n for target in targets:\n sub_targets = _find_all_targets(repo, target, recursive)\n pbar.total += len(sub_targets) - 1\n\n if os.path.isdir(target) and len(sub_targets) > LARGE_DIR_SIZE:\n logger.warning(\n \"You are adding a large directory '{target}' recursively,\"\n \" consider tracking it as a whole instead.\\n\"\n \"{purple}HINT:{nc} Remove the generated DVC-file and then\"\n \" run `{cyan}dvc add {target}{nc}`\".format(\n purple=colorama.Fore.MAGENTA,\n cyan=colorama.Fore.CYAN,\n nc=colorama.Style.RESET_ALL,\n target=target,\n )\n )\n\n stages = _create_stages(repo, sub_targets, fname, pbar=pbar)\n\n try:\n repo.check_modified_graph(stages)\n except OverlappingOutputPathsError as exc:\n msg = (\n \"Cannot add '{out}', because it is overlapping with other \"\n \"DVC tracked output: '{parent}'.\\n\"\n \"To include '{out}' in '{parent}', run \"\n \"'dvc commit {parent_stage}'\"\n ).format(\n out=exc.overlapping_out.path_info,\n parent=exc.parent.path_info,\n parent_stage=exc.parent.stage.relpath,\n )\n raise OverlappingOutputPathsError(\n exc.parent, exc.overlapping_out, msg\n )\n\n with Tqdm(\n total=len(stages),\n desc=\"Processing\",\n unit=\"file\",\n disable=True if len(stages) == 1 else None,\n ) as pbar_stages:\n for stage in stages:\n try:\n stage.save()\n except OutputDoesNotExistError:\n pbar.n -= 1\n raise\n\n if not no_commit:\n stage.commit()\n\n stage.dump()\n pbar_stages.update()\n\n stages_list += stages\n\n if num_targets == 1: # restore bar format for stats\n pbar.bar_format = pbar.BAR_FMT_DEFAULT\n\n return stages_list\n\n\ndef _find_all_targets(repo, target, recursive):\n if os.path.isdir(target) and recursive:\n return [\n fname\n for fname in Tqdm(\n repo.tree.walk_files(target),\n desc=\"Searching \" + target,\n bar_format=Tqdm.BAR_FMT_NOTOTAL,\n unit=\"file\",\n )\n if not repo.is_dvc_internal(fname)\n if not Stage.is_stage_file(fname)\n if not repo.scm.belongs_to_scm(fname)\n if not repo.scm.is_tracked(fname)\n ]\n return [target]\n\n\ndef _create_stages(repo, targets, fname, pbar=None):\n stages = []\n\n for out in Tqdm(\n targets,\n desc=\"Creating DVC-files\",\n disable=True if len(targets) < LARGE_DIR_SIZE else None,\n unit=\"file\",\n ):\n stage = Stage.create(\n repo, outs=[out], accompany_outs=True, fname=fname\n )\n\n if not stage:\n if pbar is not None:\n pbar.total -= 1\n continue\n\n stages.append(stage)\n if pbar is not None:\n pbar.update_desc(out)\n\n return stages\n", "path": "dvc/repo/add.py"}, {"content": "\"\"\"Manages progress bars for DVC repo.\"\"\"\n\nimport logging\nimport sys\nfrom threading import RLock\n\nfrom tqdm import tqdm\n\nfrom dvc.utils import env2bool\n\nlogger = logging.getLogger(__name__)\ntqdm.set_lock(RLock())\n\n\nclass Tqdm(tqdm):\n \"\"\"\n maximum-compatibility tqdm-based progressbars\n \"\"\"\n\n BAR_FMT_DEFAULT = (\n \"{percentage:3.0f}% {desc}|{bar}|\"\n \"{n_fmt}/{total_fmt}\"\n \" [{elapsed}<{remaining}, {rate_fmt:>11}{postfix}]\"\n )\n # nested bars should have fixed bar widths to align nicely\n BAR_FMT_DEFAULT_NESTED = (\n \"{percentage:3.0f}%|{bar:10}|{desc:{ncols_desc}.{ncols_desc}}\"\n \"{n_fmt}/{total_fmt}\"\n \" [{elapsed}<{remaining}, {rate_fmt:>11}{postfix}]\"\n )\n BAR_FMT_NOTOTAL = (\n \"{desc:{ncols_desc}.{ncols_desc}}{n_fmt}\"\n \" [{elapsed}, {rate_fmt:>11}{postfix}]\"\n )\n BYTES_DEFAULTS = dict(\n unit=\"B\", unit_scale=True, unit_divisor=1024, miniters=1\n )\n\n def __init__(\n self,\n iterable=None,\n disable=None,\n level=logging.ERROR,\n desc=None,\n leave=False,\n bar_format=None,\n bytes=False, # pylint: disable=W0622\n file=None,\n total=None,\n **kwargs\n ):\n \"\"\"\n bytes : shortcut for\n `unit='B', unit_scale=True, unit_divisor=1024, miniters=1`\n desc : persists after `close()`\n level : effective logging level for determining `disable`;\n used only if `disable` is unspecified\n disable : If (default: None), will be determined by logging level.\n May be overridden to `True` due to non-TTY status.\n Skip override by specifying env var `DVC_IGNORE_ISATTY`.\n kwargs : anything accepted by `tqdm.tqdm()`\n \"\"\"\n kwargs = kwargs.copy()\n if bytes:\n kwargs = {**self.BYTES_DEFAULTS, **kwargs}\n else:\n kwargs.setdefault(\"unit_scale\", total > 999 if total else True)\n if file is None:\n file = sys.stderr\n self.desc_persist = desc\n # auto-disable based on `logger.level`\n if disable is None:\n disable = logger.getEffectiveLevel() > level\n # auto-disable based on TTY\n if (\n not disable\n and not env2bool(\"DVC_IGNORE_ISATTY\")\n and hasattr(file, \"isatty\")\n ):\n disable = not file.isatty()\n super().__init__(\n iterable=iterable,\n disable=disable,\n leave=leave,\n desc=desc,\n bar_format=\"!\",\n lock_args=(False,),\n total=total,\n **kwargs\n )\n if bar_format is None:\n if self.__len__():\n self.bar_format = (\n self.BAR_FMT_DEFAULT_NESTED\n if self.pos\n else self.BAR_FMT_DEFAULT\n )\n else:\n self.bar_format = self.BAR_FMT_NOTOTAL\n else:\n self.bar_format = bar_format\n self.refresh()\n\n def update_desc(self, desc, n=1):\n \"\"\"\n Calls `set_description_str(desc)` and `update(n)`\n \"\"\"\n self.set_description_str(desc, refresh=False)\n self.update(n)\n\n def update_to(self, current, total=None):\n if total:\n self.total = total # pylint: disable=W0613,W0201\n self.update(current - self.n)\n\n def close(self):\n if self.desc_persist is not None:\n self.set_description_str(self.desc_persist, refresh=False)\n # unknown/zero ETA\n self.bar_format = self.bar_format.replace(\"<{remaining}\", \"\")\n # remove completed bar\n self.bar_format = self.bar_format.replace(\"|{bar:10}|\", \" \")\n super().close()\n\n @property\n def format_dict(self):\n \"\"\"inject `ncols_desc` to fill the display width (`ncols`)\"\"\"\n d = super().format_dict\n ncols = d[\"ncols\"] or 80\n ncols_desc = ncols - len(self.format_meter(ncols_desc=1, **d)) + 1\n ncols_desc = max(ncols_desc, 0)\n if ncols_desc:\n d[\"ncols_desc\"] = ncols_desc\n else:\n # work-around for zero-width description\n d[\"ncols_desc\"] = 1\n d[\"prefix\"] = \"\"\n return d\n", "path": "dvc/progress.py"}], "after_files": [{"content": "import logging\nimport os\n\nimport colorama\n\nfrom . import locked\nfrom ..exceptions import RecursiveAddingWhileUsingFilename\nfrom ..output.base import OutputDoesNotExistError\nfrom ..progress import Tqdm\nfrom ..repo.scm_context import scm_context\nfrom ..stage import Stage\nfrom ..utils import LARGE_DIR_SIZE\n\nlogger = logging.getLogger(__name__)\n\n\n@locked\n@scm_context\ndef add(repo, targets, recursive=False, no_commit=False, fname=None):\n if recursive and fname:\n raise RecursiveAddingWhileUsingFilename()\n\n if isinstance(targets, str):\n targets = [targets]\n\n stages_list = []\n num_targets = len(targets)\n with Tqdm(total=num_targets, desc=\"Add\", unit=\"file\", leave=True) as pbar:\n if num_targets == 1:\n # clear unneeded top-level progress bar for single target\n pbar.bar_format = \"Adding...\"\n pbar.refresh()\n for target in targets:\n sub_targets = _find_all_targets(repo, target, recursive)\n pbar.total += len(sub_targets) - 1\n\n if os.path.isdir(target) and len(sub_targets) > LARGE_DIR_SIZE:\n logger.warning(\n \"You are adding a large directory '{target}' recursively,\"\n \" consider tracking it as a whole instead.\\n\"\n \"{purple}HINT:{nc} Remove the generated DVC-file and then\"\n \" run `{cyan}dvc add {target}{nc}`\".format(\n purple=colorama.Fore.MAGENTA,\n cyan=colorama.Fore.CYAN,\n nc=colorama.Style.RESET_ALL,\n target=target,\n )\n )\n\n stages = _create_stages(repo, sub_targets, fname, pbar=pbar)\n\n repo.check_modified_graph(stages)\n\n with Tqdm(\n total=len(stages),\n desc=\"Processing\",\n unit=\"file\",\n disable=len(stages) == 1,\n ) as pbar_stages:\n for stage in stages:\n try:\n stage.save()\n except OutputDoesNotExistError:\n pbar.n -= 1\n raise\n\n if not no_commit:\n stage.commit()\n\n stage.dump()\n pbar_stages.update()\n\n stages_list += stages\n\n if num_targets == 1: # restore bar format for stats\n pbar.bar_format = pbar.BAR_FMT_DEFAULT\n\n return stages_list\n\n\ndef _find_all_targets(repo, target, recursive):\n if os.path.isdir(target) and recursive:\n return [\n fname\n for fname in Tqdm(\n repo.tree.walk_files(target),\n desc=\"Searching \" + target,\n bar_format=Tqdm.BAR_FMT_NOTOTAL,\n unit=\"file\",\n )\n if not repo.is_dvc_internal(fname)\n if not Stage.is_stage_file(fname)\n if not repo.scm.belongs_to_scm(fname)\n if not repo.scm.is_tracked(fname)\n ]\n return [target]\n\n\ndef _create_stages(repo, targets, fname, pbar=None):\n stages = []\n\n for out in Tqdm(\n targets,\n desc=\"Creating DVC-files\",\n disable=len(targets) < LARGE_DIR_SIZE,\n unit=\"file\",\n ):\n stage = Stage.create(repo, outs=[out], add=True, fname=fname)\n\n if not stage:\n if pbar is not None:\n pbar.total -= 1\n continue\n\n stages.append(stage)\n if pbar is not None:\n pbar.update_desc(out)\n\n return stages\n", "path": "dvc/repo/add.py"}, {"content": "\"\"\"Manages progress bars for DVC repo.\"\"\"\n\nimport logging\nimport sys\nfrom threading import RLock\n\nfrom tqdm import tqdm\n\nfrom dvc.utils import env2bool\n\nlogger = logging.getLogger(__name__)\ntqdm.set_lock(RLock())\n\n\nclass Tqdm(tqdm):\n \"\"\"\n maximum-compatibility tqdm-based progressbars\n \"\"\"\n\n BAR_FMT_DEFAULT = (\n \"{percentage:3.0f}% {desc}|{bar}|\"\n \"{n_fmt}/{total_fmt}\"\n \" [{elapsed}<{remaining}, {rate_fmt:>11}{postfix}]\"\n )\n # nested bars should have fixed bar widths to align nicely\n BAR_FMT_DEFAULT_NESTED = (\n \"{percentage:3.0f}%|{bar:10}|{desc:{ncols_desc}.{ncols_desc}}\"\n \"{n_fmt}/{total_fmt}\"\n \" [{elapsed}<{remaining}, {rate_fmt:>11}{postfix}]\"\n )\n BAR_FMT_NOTOTAL = (\n \"{desc:{ncols_desc}.{ncols_desc}}{n_fmt}\"\n \" [{elapsed}, {rate_fmt:>11}{postfix}]\"\n )\n BYTES_DEFAULTS = dict(\n unit=\"B\", unit_scale=True, unit_divisor=1024, miniters=1\n )\n\n def __init__(\n self,\n iterable=None,\n disable=None,\n level=logging.ERROR,\n desc=None,\n leave=False,\n bar_format=None,\n bytes=False, # pylint: disable=W0622\n file=None,\n total=None,\n **kwargs\n ):\n \"\"\"\n bytes : shortcut for\n `unit='B', unit_scale=True, unit_divisor=1024, miniters=1`\n desc : persists after `close()`\n level : effective logging level for determining `disable`;\n used only if `disable` is unspecified\n disable : If (default: None) or False,\n will be determined by logging level.\n May be overridden to `True` due to non-TTY status.\n Skip override by specifying env var `DVC_IGNORE_ISATTY`.\n kwargs : anything accepted by `tqdm.tqdm()`\n \"\"\"\n kwargs = kwargs.copy()\n if bytes:\n kwargs = {**self.BYTES_DEFAULTS, **kwargs}\n else:\n kwargs.setdefault(\"unit_scale\", total > 999 if total else True)\n if file is None:\n file = sys.stderr\n self.desc_persist = desc\n # auto-disable based on `logger.level`\n if not disable:\n disable = logger.getEffectiveLevel() > level\n # auto-disable based on TTY\n if (\n not disable\n and not env2bool(\"DVC_IGNORE_ISATTY\")\n and hasattr(file, \"isatty\")\n ):\n disable = not file.isatty()\n super().__init__(\n iterable=iterable,\n disable=disable,\n leave=leave,\n desc=desc,\n bar_format=\"!\",\n lock_args=(False,),\n total=total,\n **kwargs\n )\n if bar_format is None:\n if self.__len__():\n self.bar_format = (\n self.BAR_FMT_DEFAULT_NESTED\n if self.pos\n else self.BAR_FMT_DEFAULT\n )\n else:\n self.bar_format = self.BAR_FMT_NOTOTAL\n else:\n self.bar_format = bar_format\n self.refresh()\n\n def update_desc(self, desc, n=1):\n \"\"\"\n Calls `set_description_str(desc)` and `update(n)`\n \"\"\"\n self.set_description_str(desc, refresh=False)\n self.update(n)\n\n def update_to(self, current, total=None):\n if total:\n self.total = total # pylint: disable=W0613,W0201\n self.update(current - self.n)\n\n def close(self):\n if self.desc_persist is not None:\n self.set_description_str(self.desc_persist, refresh=False)\n # unknown/zero ETA\n self.bar_format = self.bar_format.replace(\"<{remaining}\", \"\")\n # remove completed bar\n self.bar_format = self.bar_format.replace(\"|{bar:10}|\", \" \")\n super().close()\n\n @property\n def format_dict(self):\n \"\"\"inject `ncols_desc` to fill the display width (`ncols`)\"\"\"\n d = super().format_dict\n ncols = d[\"ncols\"] or 80\n ncols_desc = ncols - len(self.format_meter(ncols_desc=1, **d)) + 1\n ncols_desc = max(ncols_desc, 0)\n if ncols_desc:\n d[\"ncols_desc\"] = ncols_desc\n else:\n # work-around for zero-width description\n d[\"ncols_desc\"] = 1\n d[\"prefix\"] = \"\"\n return d\n", "path": "dvc/progress.py"}]}
| 3,281 | 426 |
gh_patches_debug_11671
|
rasdani/github-patches
|
git_diff
|
netbox-community__netbox-14461
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove the `clearcache` management command
### Proposed Changes
Remove the `clearcache` management command (from the `core` app), and omit it from the upgrade script.
### Justification
~This command was introduced back when we were experimenting with query caching, and is no longer needed.~ I was mistaken; it was actually introduced under #9122 to provide a mechanism for clearing the cached API spec. However, this is also no longer used since we moved to `drf-spectacular` (see #9608).
The Django cache is currently used only for discrete caching operations, including:
* Config revision tracking
* Recording the most recent release
* Caching RSS feed content (the RSSFeedWidget)
There has already been at least one bug related to this function (see #14182). Additionally, plugins may utilize the cache for other purposes, and we cannot make the assumption that it is safe to clear other cached data.
### Impact
Any mechanisms within NetBox or a plugin which employ caching will be responsible for their own cleanup, where applicable.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/core/management/commands/clearcache.py`
Content:
```
1 from django.core.cache import cache
2 from django.core.management.base import BaseCommand
3
4 from core.models import ConfigRevision
5
6
7 class Command(BaseCommand):
8 """Command to clear the entire cache."""
9 help = 'Clears the cache.'
10
11 def handle(self, *args, **kwargs):
12 # Fetch the current config revision from the cache
13 config_version = cache.get('config_version')
14 # Clear the cache
15 cache.clear()
16 self.stdout.write('Cache has been cleared.', ending="\n")
17 if config_version:
18 # Activate the current config revision
19 ConfigRevision.objects.get(id=config_version).activate()
20 self.stdout.write(f'Config revision ({config_version}) has been restored.', ending="\n")
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/netbox/core/management/commands/clearcache.py b/netbox/core/management/commands/clearcache.py
deleted file mode 100644
--- a/netbox/core/management/commands/clearcache.py
+++ /dev/null
@@ -1,20 +0,0 @@
-from django.core.cache import cache
-from django.core.management.base import BaseCommand
-
-from core.models import ConfigRevision
-
-
-class Command(BaseCommand):
- """Command to clear the entire cache."""
- help = 'Clears the cache.'
-
- def handle(self, *args, **kwargs):
- # Fetch the current config revision from the cache
- config_version = cache.get('config_version')
- # Clear the cache
- cache.clear()
- self.stdout.write('Cache has been cleared.', ending="\n")
- if config_version:
- # Activate the current config revision
- ConfigRevision.objects.get(id=config_version).activate()
- self.stdout.write(f'Config revision ({config_version}) has been restored.', ending="\n")
|
{"golden_diff": "diff --git a/netbox/core/management/commands/clearcache.py b/netbox/core/management/commands/clearcache.py\ndeleted file mode 100644\n--- a/netbox/core/management/commands/clearcache.py\n+++ /dev/null\n@@ -1,20 +0,0 @@\n-from django.core.cache import cache\n-from django.core.management.base import BaseCommand\n-\n-from core.models import ConfigRevision\n-\n-\n-class Command(BaseCommand):\n- \"\"\"Command to clear the entire cache.\"\"\"\n- help = 'Clears the cache.'\n-\n- def handle(self, *args, **kwargs):\n- # Fetch the current config revision from the cache\n- config_version = cache.get('config_version')\n- # Clear the cache\n- cache.clear()\n- self.stdout.write('Cache has been cleared.', ending=\"\\n\")\n- if config_version:\n- # Activate the current config revision\n- ConfigRevision.objects.get(id=config_version).activate()\n- self.stdout.write(f'Config revision ({config_version}) has been restored.', ending=\"\\n\")\n", "issue": "Remove the `clearcache` management command\n### Proposed Changes\r\n\r\nRemove the `clearcache` management command (from the `core` app), and omit it from the upgrade script.\r\n\r\n### Justification\r\n\r\n~This command was introduced back when we were experimenting with query caching, and is no longer needed.~ I was mistaken; it was actually introduced under #9122 to provide a mechanism for clearing the cached API spec. However, this is also no longer used since we moved to `drf-spectacular` (see #9608).\r\n\r\nThe Django cache is currently used only for discrete caching operations, including:\r\n\r\n* Config revision tracking\r\n* Recording the most recent release\r\n* Caching RSS feed content (the RSSFeedWidget)\r\n\r\nThere has already been at least one bug related to this function (see #14182). Additionally, plugins may utilize the cache for other purposes, and we cannot make the assumption that it is safe to clear other cached data.\r\n\r\n### Impact\r\n\r\nAny mechanisms within NetBox or a plugin which employ caching will be responsible for their own cleanup, where applicable.\n", "before_files": [{"content": "from django.core.cache import cache\nfrom django.core.management.base import BaseCommand\n\nfrom core.models import ConfigRevision\n\n\nclass Command(BaseCommand):\n \"\"\"Command to clear the entire cache.\"\"\"\n help = 'Clears the cache.'\n\n def handle(self, *args, **kwargs):\n # Fetch the current config revision from the cache\n config_version = cache.get('config_version')\n # Clear the cache\n cache.clear()\n self.stdout.write('Cache has been cleared.', ending=\"\\n\")\n if config_version:\n # Activate the current config revision\n ConfigRevision.objects.get(id=config_version).activate()\n self.stdout.write(f'Config revision ({config_version}) has been restored.', ending=\"\\n\")\n", "path": "netbox/core/management/commands/clearcache.py"}], "after_files": [{"content": null, "path": "netbox/core/management/commands/clearcache.py"}]}
| 674 | 231 |
gh_patches_debug_54036
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-3190
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 0.1.3
## 2023-08-16
```[tasklist]
### Tasks
- [x] Cut 0.1.3 release branch, freeze code
- [x] Update version number in all places in the new branch
- [x] Make an image from the branch with tag `0.1.3`, push to Dockerhub
- [x] Test installation with the new image
- [x] Test upgrade
- [x] Smoke testing application
- [x] Stability of the newly released items
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/__init__.py`
Content:
```
1 default_app_config = 'mathesar.apps.MathesarConfig'
2
3 __version__ = "0.1.2"
4
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mathesar/__init__.py b/mathesar/__init__.py
--- a/mathesar/__init__.py
+++ b/mathesar/__init__.py
@@ -1,3 +1,3 @@
default_app_config = 'mathesar.apps.MathesarConfig'
-__version__ = "0.1.2"
+__version__ = "0.1.3"
|
{"golden_diff": "diff --git a/mathesar/__init__.py b/mathesar/__init__.py\n--- a/mathesar/__init__.py\n+++ b/mathesar/__init__.py\n@@ -1,3 +1,3 @@\n default_app_config = 'mathesar.apps.MathesarConfig'\n \n-__version__ = \"0.1.2\"\n+__version__ = \"0.1.3\"\n", "issue": "Release 0.1.3\n## 2023-08-16\r\n```[tasklist]\r\n### Tasks\r\n- [x] Cut 0.1.3 release branch, freeze code\r\n- [x] Update version number in all places in the new branch\r\n- [x] Make an image from the branch with tag `0.1.3`, push to Dockerhub\r\n- [x] Test installation with the new image\r\n- [x] Test upgrade\r\n- [x] Smoke testing application\r\n- [x] Stability of the newly released items\r\n```\r\n\n", "before_files": [{"content": "default_app_config = 'mathesar.apps.MathesarConfig'\n\n__version__ = \"0.1.2\"\n", "path": "mathesar/__init__.py"}], "after_files": [{"content": "default_app_config = 'mathesar.apps.MathesarConfig'\n\n__version__ = \"0.1.3\"\n", "path": "mathesar/__init__.py"}]}
| 413 | 83 |
gh_patches_debug_10502
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-158
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add custom (meta) data on buckets and collections
For some use-cases, it might become useful to be able to store some custom attributes in buckets or collections (e.g. metadata like application version, contact email or whatever).
Currently both Collection and Bucket resources do not define extra fields in their schema, and Cliquet drops unknown fields if not explicitly allowed.
We can either:
- Allow unknown fields in collection and buckets schemas
- Add a specific root level field (along `data` and `permissions`)
- Add a specific field (called `meta` for example) in the schema that could receive anything.
The advantage of the latter is that custom fields do not interfere with anything in the protocol, and are trivial to implement. The inconvenient is having to put `{data: {metadata: {email: "[email protected]"}}` in the payload.
Thoughts ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/views/collections.py`
Content:
```
1 import colander
2 import jsonschema
3 from cliquet import resource
4 from jsonschema import exceptions as jsonschema_exceptions
5
6 from kinto.views import NameGenerator, object_exists_or_404
7
8
9 class JSONSchemaMapping(colander.SchemaNode):
10 def schema_type(self, **kw):
11 return colander.Mapping(unknown='preserve')
12
13 def deserialize(self, cstruct=colander.null):
14 # Start by deserializing a simple mapping.
15 validated = super(JSONSchemaMapping, self).deserialize(cstruct)
16
17 # In case it is optional in parent schema.
18 if not validated or validated in (colander.null, colander.drop):
19 return validated
20
21 try:
22 jsonschema.Draft4Validator.check_schema(validated)
23 except jsonschema_exceptions.SchemaError as e:
24 self.raise_invalid(e.path.pop() + e.message)
25 return validated
26
27
28 class CollectionSchema(resource.ResourceSchema):
29 schema = JSONSchemaMapping(missing=colander.drop)
30
31
32 @resource.register(name='collection',
33 collection_methods=('GET',),
34 collection_path='/buckets/{{bucket_id}}/collections',
35 record_path='/buckets/{{bucket_id}}/collections/{{id}}')
36 class Collection(resource.ProtectedResource):
37 mapping = CollectionSchema()
38 permissions = ('read', 'write', 'record:create')
39
40 def __init__(self, *args, **kwargs):
41 super(Collection, self).__init__(*args, **kwargs)
42
43 bucket_id = self.request.matchdict['bucket_id']
44 object_exists_or_404(self.request,
45 collection_id='bucket',
46 object_id=bucket_id)
47
48 self.collection.id_generator = NameGenerator()
49
50 def get_parent_id(self, request):
51 bucket_id = request.matchdict['bucket_id']
52 parent_id = '/buckets/%s' % bucket_id
53 return parent_id
54
55 def delete(self):
56 result = super(Collection, self).delete()
57
58 # Delete records.
59 storage = self.collection.storage
60 parent_id = '%s/collections/%s' % (self.collection.parent_id,
61 self.record_id)
62 storage.delete_all(collection_id='record',
63 parent_id=parent_id,
64 with_deleted=False)
65 storage.purge_deleted(collection_id='record', parent_id=parent_id)
66
67 return result
68
```
Path: `kinto/views/records.py`
Content:
```
1 import jsonschema
2 from cliquet import resource, schema
3 from cliquet.errors import raise_invalid
4 from jsonschema import exceptions as jsonschema_exceptions
5
6 from kinto.views import object_exists_or_404
7
8
9 class RecordSchema(schema.ResourceSchema):
10 class Options():
11 preserve_unknown = True
12
13
14 _parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'
15
16
17 @resource.register(name='record',
18 collection_path=_parent_path + '/records',
19 record_path=_parent_path + '/records/{{id}}')
20 class Record(resource.ProtectedResource):
21
22 mapping = RecordSchema()
23 schema_field = 'schema'
24
25 def __init__(self, *args, **kwargs):
26 super(Record, self).__init__(*args, **kwargs)
27
28 bucket_id = self.request.matchdict['bucket_id']
29 object_exists_or_404(self.request,
30 collection_id='bucket',
31 object_id=bucket_id)
32
33 collection_id = self.request.matchdict['collection_id']
34 parent_id = '/buckets/%s' % bucket_id
35 self._collection = object_exists_or_404(self.request,
36 collection_id='collection',
37 parent_id=parent_id,
38 object_id=collection_id)
39
40 def get_parent_id(self, request):
41 bucket_id = request.matchdict['bucket_id']
42 collection_id = request.matchdict['collection_id']
43 return '/buckets/%s/collections/%s' % (bucket_id, collection_id)
44
45 def is_known_field(self, field_name):
46 """Without schema, any field is considered as known."""
47 return True
48
49 def process_record(self, new, old=None):
50 """Validate records against collection schema, if any."""
51 schema = self._collection.get('schema')
52 if not schema:
53 return new
54
55 collection_timestamp = self._collection[self.collection.modified_field]
56
57 try:
58 jsonschema.validate(new, schema)
59 new[self.schema_field] = collection_timestamp
60 except jsonschema_exceptions.ValidationError as e:
61 field = e.path.pop() if e.path else e.validator_value.pop()
62 raise_invalid(self.request, name=field, description=e.message)
63
64 return new
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kinto/views/collections.py b/kinto/views/collections.py
--- a/kinto/views/collections.py
+++ b/kinto/views/collections.py
@@ -28,6 +28,9 @@
class CollectionSchema(resource.ResourceSchema):
schema = JSONSchemaMapping(missing=colander.drop)
+ class Options:
+ preserve_unknown = True
+
@resource.register(name='collection',
collection_methods=('GET',),
diff --git a/kinto/views/records.py b/kinto/views/records.py
--- a/kinto/views/records.py
+++ b/kinto/views/records.py
@@ -7,7 +7,7 @@
class RecordSchema(schema.ResourceSchema):
- class Options():
+ class Options:
preserve_unknown = True
|
{"golden_diff": "diff --git a/kinto/views/collections.py b/kinto/views/collections.py\n--- a/kinto/views/collections.py\n+++ b/kinto/views/collections.py\n@@ -28,6 +28,9 @@\n class CollectionSchema(resource.ResourceSchema):\n schema = JSONSchemaMapping(missing=colander.drop)\n \n+ class Options:\n+ preserve_unknown = True\n+\n \n @resource.register(name='collection',\n collection_methods=('GET',),\ndiff --git a/kinto/views/records.py b/kinto/views/records.py\n--- a/kinto/views/records.py\n+++ b/kinto/views/records.py\n@@ -7,7 +7,7 @@\n \n \n class RecordSchema(schema.ResourceSchema):\n- class Options():\n+ class Options:\n preserve_unknown = True\n", "issue": "Add custom (meta) data on buckets and collections \nFor some use-cases, it might become useful to be able to store some custom attributes in buckets or collections (e.g. metadata like application version, contact email or whatever).\n\nCurrently both Collection and Bucket resources do not define extra fields in their schema, and Cliquet drops unknown fields if not explicitly allowed.\n\nWe can either:\n- Allow unknown fields in collection and buckets schemas\n- Add a specific root level field (along `data` and `permissions`)\n- Add a specific field (called `meta` for example) in the schema that could receive anything.\n\nThe advantage of the latter is that custom fields do not interfere with anything in the protocol, and are trivial to implement. The inconvenient is having to put `{data: {metadata: {email: \"[email protected]\"}}` in the payload.\n\nThoughts ?\n\n", "before_files": [{"content": "import colander\nimport jsonschema\nfrom cliquet import resource\nfrom jsonschema import exceptions as jsonschema_exceptions\n\nfrom kinto.views import NameGenerator, object_exists_or_404\n\n\nclass JSONSchemaMapping(colander.SchemaNode):\n def schema_type(self, **kw):\n return colander.Mapping(unknown='preserve')\n\n def deserialize(self, cstruct=colander.null):\n # Start by deserializing a simple mapping.\n validated = super(JSONSchemaMapping, self).deserialize(cstruct)\n\n # In case it is optional in parent schema.\n if not validated or validated in (colander.null, colander.drop):\n return validated\n\n try:\n jsonschema.Draft4Validator.check_schema(validated)\n except jsonschema_exceptions.SchemaError as e:\n self.raise_invalid(e.path.pop() + e.message)\n return validated\n\n\nclass CollectionSchema(resource.ResourceSchema):\n schema = JSONSchemaMapping(missing=colander.drop)\n\n\[email protected](name='collection',\n collection_methods=('GET',),\n collection_path='/buckets/{{bucket_id}}/collections',\n record_path='/buckets/{{bucket_id}}/collections/{{id}}')\nclass Collection(resource.ProtectedResource):\n mapping = CollectionSchema()\n permissions = ('read', 'write', 'record:create')\n\n def __init__(self, *args, **kwargs):\n super(Collection, self).__init__(*args, **kwargs)\n\n bucket_id = self.request.matchdict['bucket_id']\n object_exists_or_404(self.request,\n collection_id='bucket',\n object_id=bucket_id)\n\n self.collection.id_generator = NameGenerator()\n\n def get_parent_id(self, request):\n bucket_id = request.matchdict['bucket_id']\n parent_id = '/buckets/%s' % bucket_id\n return parent_id\n\n def delete(self):\n result = super(Collection, self).delete()\n\n # Delete records.\n storage = self.collection.storage\n parent_id = '%s/collections/%s' % (self.collection.parent_id,\n self.record_id)\n storage.delete_all(collection_id='record',\n parent_id=parent_id,\n with_deleted=False)\n storage.purge_deleted(collection_id='record', parent_id=parent_id)\n\n return result\n", "path": "kinto/views/collections.py"}, {"content": "import jsonschema\nfrom cliquet import resource, schema\nfrom cliquet.errors import raise_invalid\nfrom jsonschema import exceptions as jsonschema_exceptions\n\nfrom kinto.views import object_exists_or_404\n\n\nclass RecordSchema(schema.ResourceSchema):\n class Options():\n preserve_unknown = True\n\n\n_parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'\n\n\[email protected](name='record',\n collection_path=_parent_path + '/records',\n record_path=_parent_path + '/records/{{id}}')\nclass Record(resource.ProtectedResource):\n\n mapping = RecordSchema()\n schema_field = 'schema'\n\n def __init__(self, *args, **kwargs):\n super(Record, self).__init__(*args, **kwargs)\n\n bucket_id = self.request.matchdict['bucket_id']\n object_exists_or_404(self.request,\n collection_id='bucket',\n object_id=bucket_id)\n\n collection_id = self.request.matchdict['collection_id']\n parent_id = '/buckets/%s' % bucket_id\n self._collection = object_exists_or_404(self.request,\n collection_id='collection',\n parent_id=parent_id,\n object_id=collection_id)\n\n def get_parent_id(self, request):\n bucket_id = request.matchdict['bucket_id']\n collection_id = request.matchdict['collection_id']\n return '/buckets/%s/collections/%s' % (bucket_id, collection_id)\n\n def is_known_field(self, field_name):\n \"\"\"Without schema, any field is considered as known.\"\"\"\n return True\n\n def process_record(self, new, old=None):\n \"\"\"Validate records against collection schema, if any.\"\"\"\n schema = self._collection.get('schema')\n if not schema:\n return new\n\n collection_timestamp = self._collection[self.collection.modified_field]\n\n try:\n jsonschema.validate(new, schema)\n new[self.schema_field] = collection_timestamp\n except jsonschema_exceptions.ValidationError as e:\n field = e.path.pop() if e.path else e.validator_value.pop()\n raise_invalid(self.request, name=field, description=e.message)\n\n return new\n", "path": "kinto/views/records.py"}], "after_files": [{"content": "import colander\nimport jsonschema\nfrom cliquet import resource\nfrom jsonschema import exceptions as jsonschema_exceptions\n\nfrom kinto.views import NameGenerator, object_exists_or_404\n\n\nclass JSONSchemaMapping(colander.SchemaNode):\n def schema_type(self, **kw):\n return colander.Mapping(unknown='preserve')\n\n def deserialize(self, cstruct=colander.null):\n # Start by deserializing a simple mapping.\n validated = super(JSONSchemaMapping, self).deserialize(cstruct)\n\n # In case it is optional in parent schema.\n if not validated or validated in (colander.null, colander.drop):\n return validated\n\n try:\n jsonschema.Draft4Validator.check_schema(validated)\n except jsonschema_exceptions.SchemaError as e:\n self.raise_invalid(e.path.pop() + e.message)\n return validated\n\n\nclass CollectionSchema(resource.ResourceSchema):\n schema = JSONSchemaMapping(missing=colander.drop)\n\n class Options:\n preserve_unknown = True\n\n\[email protected](name='collection',\n collection_methods=('GET',),\n collection_path='/buckets/{{bucket_id}}/collections',\n record_path='/buckets/{{bucket_id}}/collections/{{id}}')\nclass Collection(resource.ProtectedResource):\n mapping = CollectionSchema()\n permissions = ('read', 'write', 'record:create')\n\n def __init__(self, *args, **kwargs):\n super(Collection, self).__init__(*args, **kwargs)\n\n bucket_id = self.request.matchdict['bucket_id']\n object_exists_or_404(self.request,\n collection_id='bucket',\n object_id=bucket_id)\n\n self.collection.id_generator = NameGenerator()\n\n def get_parent_id(self, request):\n bucket_id = request.matchdict['bucket_id']\n parent_id = '/buckets/%s' % bucket_id\n return parent_id\n\n def delete(self):\n result = super(Collection, self).delete()\n\n # Delete records.\n storage = self.collection.storage\n parent_id = '%s/collections/%s' % (self.collection.parent_id,\n self.record_id)\n storage.delete_all(collection_id='record',\n parent_id=parent_id,\n with_deleted=False)\n storage.purge_deleted(collection_id='record', parent_id=parent_id)\n\n return result\n", "path": "kinto/views/collections.py"}, {"content": "import jsonschema\nfrom cliquet import resource, schema\nfrom cliquet.errors import raise_invalid\nfrom jsonschema import exceptions as jsonschema_exceptions\n\nfrom kinto.views import object_exists_or_404\n\n\nclass RecordSchema(schema.ResourceSchema):\n class Options:\n preserve_unknown = True\n\n\n_parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'\n\n\[email protected](name='record',\n collection_path=_parent_path + '/records',\n record_path=_parent_path + '/records/{{id}}')\nclass Record(resource.ProtectedResource):\n\n mapping = RecordSchema()\n schema_field = 'schema'\n\n def __init__(self, *args, **kwargs):\n super(Record, self).__init__(*args, **kwargs)\n\n bucket_id = self.request.matchdict['bucket_id']\n object_exists_or_404(self.request,\n collection_id='bucket',\n object_id=bucket_id)\n\n collection_id = self.request.matchdict['collection_id']\n parent_id = '/buckets/%s' % bucket_id\n self._collection = object_exists_or_404(self.request,\n collection_id='collection',\n parent_id=parent_id,\n object_id=collection_id)\n\n def get_parent_id(self, request):\n bucket_id = request.matchdict['bucket_id']\n collection_id = request.matchdict['collection_id']\n return '/buckets/%s/collections/%s' % (bucket_id, collection_id)\n\n def is_known_field(self, field_name):\n \"\"\"Without schema, any field is considered as known.\"\"\"\n return True\n\n def process_record(self, new, old=None):\n \"\"\"Validate records against collection schema, if any.\"\"\"\n schema = self._collection.get('schema')\n if not schema:\n return new\n\n collection_timestamp = self._collection[self.collection.modified_field]\n\n try:\n jsonschema.validate(new, schema)\n new[self.schema_field] = collection_timestamp\n except jsonschema_exceptions.ValidationError as e:\n field = e.path.pop() if e.path else e.validator_value.pop()\n raise_invalid(self.request, name=field, description=e.message)\n\n return new\n", "path": "kinto/views/records.py"}]}
| 1,645 | 167 |
gh_patches_debug_5423
|
rasdani/github-patches
|
git_diff
|
ivy-llc__ivy-18290
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
linear
#15051
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/paddle/nn/functional/common.py`
Content:
```
1 # local
2 import ivy
3 from ivy.func_wrapper import with_supported_dtypes
4 from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back
5
6
7 @to_ivy_arrays_and_back
8 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle")
9 def cosine_similarity(x1, x2, *, axis=1, eps=1e-08):
10 if len(x1.shape) == len(x2.shape) and len(x2.shape) >= 2:
11 numerator = ivy.sum(x1 * x2, axis=axis)
12 x1_squared_norm = ivy.sum(ivy.square(x1), axis=axis)
13 x2_squared_norm = ivy.sum(ivy.square(x2), axis=axis)
14 else:
15 numerator = ivy.sum(x1 * x2)
16 x1_squared_norm = ivy.sum(ivy.square(x1))
17 x2_squared_norm = ivy.sum(ivy.square(x2))
18
19 x1_norm = ivy.sqrt(x1_squared_norm)
20 x2_norm = ivy.sqrt(x2_squared_norm)
21 norm_mm = x1_norm * x2_norm
22 denominator = ivy.maximum(norm_mm, eps)
23
24 cosine = numerator / denominator
25 return cosine
26
27
28 @to_ivy_arrays_and_back
29 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle")
30 def dropout2d(x, *, p=0.5, training=True, data_format="NCHW", name=None):
31 return ivy.dropout2d(x, p=p, training=training, data_format=data_format)
32
33
34 def get_mask(shape, device, prob, seed=None):
35 mask = ivy.where(
36 ivy.random_uniform(shape=shape, device=device, seed=seed) < prob,
37 0.0,
38 1.0,
39 )
40 return mask
41
42
43 @to_ivy_arrays_and_back
44 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle")
45 def dropout(x, p=0.5, axis=None, training=True, mode="upscale_in_train", name=None):
46 if axis > 1:
47 raise ValueError("Axis value can only be 0 or 1 or None.")
48 elif axis is None or (isinstance(axis, list) and len(axis) == 2):
49 mask = get_mask(shape=x.shape, device=ivy.dev(x), prob=p, seed=None)
50 elif axis == 0:
51 mask = get_mask(shape=(x.shape[0], 1), device=ivy.dev(x), prob=p)
52 mask = ivy.broadcast_to(mask, x.shape)
53 elif axis == 1:
54 mask = get_mask(shape=(1, x.shape[1]), device=ivy.dev(x), prob=p)
55 mask = ivy.broadcast_to(mask, x.shape)
56 if mode == "upscale_in_train":
57 if training:
58 out = ivy.multiply(x, mask)
59 ret = ivy.multiply(out, 1.0 / (1.0 - p))
60 else:
61 ret = x
62 else:
63 if training:
64 ret = ivy.multiply(x, mask)
65 else:
66 ret = ivy.multiply(x, (1.0 - p))
67 return ret
68
69
70 @to_ivy_arrays_and_back
71 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle")
72 def zeropad2d(x, padding, data_format="NCHW", name=None):
73 if ivy.is_array(padding):
74 padding = padding.to_list()
75 if isinstance(padding, int):
76 padding = [padding, padding, padding, padding]
77 if len(padding) != 4:
78 raise ValueError("Padding length should be 4.")
79 if x.ndim != 4:
80 raise ValueError("Input x must be 4-dimensional.")
81 if data_format == "NCHW":
82 padding = ((0, 0), (0, 0), (padding[2], padding[3]), (padding[0], padding[1]))
83 elif data_format == "NHWC":
84 padding = ((0, 0), (padding[2], padding[3]), (padding[0], padding[1]), (0, 0))
85 else:
86 raise ValueError("Unknown data_format: {}".format(data_format))
87 return ivy.pad(x, padding, mode="constant", constant_values=0.0)
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ivy/functional/frontends/paddle/nn/functional/common.py b/ivy/functional/frontends/paddle/nn/functional/common.py
--- a/ivy/functional/frontends/paddle/nn/functional/common.py
+++ b/ivy/functional/frontends/paddle/nn/functional/common.py
@@ -85,3 +85,10 @@
else:
raise ValueError("Unknown data_format: {}".format(data_format))
return ivy.pad(x, padding, mode="constant", constant_values=0.0)
+
+
+@to_ivy_arrays_and_back
+@with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle")
+def linear(x, weight, bias=None, name=None):
+ weight = ivy.swapaxes(weight, -1, -2)
+ return ivy.linear(x, weight, bias=bias)
|
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/nn/functional/common.py b/ivy/functional/frontends/paddle/nn/functional/common.py\n--- a/ivy/functional/frontends/paddle/nn/functional/common.py\n+++ b/ivy/functional/frontends/paddle/nn/functional/common.py\n@@ -85,3 +85,10 @@\n else:\n raise ValueError(\"Unknown data_format: {}\".format(data_format))\n return ivy.pad(x, padding, mode=\"constant\", constant_values=0.0)\n+\n+\n+@to_ivy_arrays_and_back\n+@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n+def linear(x, weight, bias=None, name=None):\n+ weight = ivy.swapaxes(weight, -1, -2)\n+ return ivy.linear(x, weight, bias=bias)\n", "issue": "linear\n#15051 \n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef cosine_similarity(x1, x2, *, axis=1, eps=1e-08):\n if len(x1.shape) == len(x2.shape) and len(x2.shape) >= 2:\n numerator = ivy.sum(x1 * x2, axis=axis)\n x1_squared_norm = ivy.sum(ivy.square(x1), axis=axis)\n x2_squared_norm = ivy.sum(ivy.square(x2), axis=axis)\n else:\n numerator = ivy.sum(x1 * x2)\n x1_squared_norm = ivy.sum(ivy.square(x1))\n x2_squared_norm = ivy.sum(ivy.square(x2))\n\n x1_norm = ivy.sqrt(x1_squared_norm)\n x2_norm = ivy.sqrt(x2_squared_norm)\n norm_mm = x1_norm * x2_norm\n denominator = ivy.maximum(norm_mm, eps)\n\n cosine = numerator / denominator\n return cosine\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef dropout2d(x, *, p=0.5, training=True, data_format=\"NCHW\", name=None):\n return ivy.dropout2d(x, p=p, training=training, data_format=data_format)\n\n\ndef get_mask(shape, device, prob, seed=None):\n mask = ivy.where(\n ivy.random_uniform(shape=shape, device=device, seed=seed) < prob,\n 0.0,\n 1.0,\n )\n return mask\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef dropout(x, p=0.5, axis=None, training=True, mode=\"upscale_in_train\", name=None):\n if axis > 1:\n raise ValueError(\"Axis value can only be 0 or 1 or None.\")\n elif axis is None or (isinstance(axis, list) and len(axis) == 2):\n mask = get_mask(shape=x.shape, device=ivy.dev(x), prob=p, seed=None)\n elif axis == 0:\n mask = get_mask(shape=(x.shape[0], 1), device=ivy.dev(x), prob=p)\n mask = ivy.broadcast_to(mask, x.shape)\n elif axis == 1:\n mask = get_mask(shape=(1, x.shape[1]), device=ivy.dev(x), prob=p)\n mask = ivy.broadcast_to(mask, x.shape)\n if mode == \"upscale_in_train\":\n if training:\n out = ivy.multiply(x, mask)\n ret = ivy.multiply(out, 1.0 / (1.0 - p))\n else:\n ret = x\n else:\n if training:\n ret = ivy.multiply(x, mask)\n else:\n ret = ivy.multiply(x, (1.0 - p))\n return ret\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef zeropad2d(x, padding, data_format=\"NCHW\", name=None):\n if ivy.is_array(padding):\n padding = padding.to_list()\n if isinstance(padding, int):\n padding = [padding, padding, padding, padding]\n if len(padding) != 4:\n raise ValueError(\"Padding length should be 4.\")\n if x.ndim != 4:\n raise ValueError(\"Input x must be 4-dimensional.\")\n if data_format == \"NCHW\":\n padding = ((0, 0), (0, 0), (padding[2], padding[3]), (padding[0], padding[1]))\n elif data_format == \"NHWC\":\n padding = ((0, 0), (padding[2], padding[3]), (padding[0], padding[1]), (0, 0))\n else:\n raise ValueError(\"Unknown data_format: {}\".format(data_format))\n return ivy.pad(x, padding, mode=\"constant\", constant_values=0.0)\n", "path": "ivy/functional/frontends/paddle/nn/functional/common.py"}], "after_files": [{"content": "# local\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef cosine_similarity(x1, x2, *, axis=1, eps=1e-08):\n if len(x1.shape) == len(x2.shape) and len(x2.shape) >= 2:\n numerator = ivy.sum(x1 * x2, axis=axis)\n x1_squared_norm = ivy.sum(ivy.square(x1), axis=axis)\n x2_squared_norm = ivy.sum(ivy.square(x2), axis=axis)\n else:\n numerator = ivy.sum(x1 * x2)\n x1_squared_norm = ivy.sum(ivy.square(x1))\n x2_squared_norm = ivy.sum(ivy.square(x2))\n\n x1_norm = ivy.sqrt(x1_squared_norm)\n x2_norm = ivy.sqrt(x2_squared_norm)\n norm_mm = x1_norm * x2_norm\n denominator = ivy.maximum(norm_mm, eps)\n\n cosine = numerator / denominator\n return cosine\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef dropout2d(x, *, p=0.5, training=True, data_format=\"NCHW\", name=None):\n return ivy.dropout2d(x, p=p, training=training, data_format=data_format)\n\n\ndef get_mask(shape, device, prob, seed=None):\n mask = ivy.where(\n ivy.random_uniform(shape=shape, device=device, seed=seed) < prob,\n 0.0,\n 1.0,\n )\n return mask\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef dropout(x, p=0.5, axis=None, training=True, mode=\"upscale_in_train\", name=None):\n if axis > 1:\n raise ValueError(\"Axis value can only be 0 or 1 or None.\")\n elif axis is None or (isinstance(axis, list) and len(axis) == 2):\n mask = get_mask(shape=x.shape, device=ivy.dev(x), prob=p, seed=None)\n elif axis == 0:\n mask = get_mask(shape=(x.shape[0], 1), device=ivy.dev(x), prob=p)\n mask = ivy.broadcast_to(mask, x.shape)\n elif axis == 1:\n mask = get_mask(shape=(1, x.shape[1]), device=ivy.dev(x), prob=p)\n mask = ivy.broadcast_to(mask, x.shape)\n if mode == \"upscale_in_train\":\n if training:\n out = ivy.multiply(x, mask)\n ret = ivy.multiply(out, 1.0 / (1.0 - p))\n else:\n ret = x\n else:\n if training:\n ret = ivy.multiply(x, mask)\n else:\n ret = ivy.multiply(x, (1.0 - p))\n return ret\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef zeropad2d(x, padding, data_format=\"NCHW\", name=None):\n if ivy.is_array(padding):\n padding = padding.to_list()\n if isinstance(padding, int):\n padding = [padding, padding, padding, padding]\n if len(padding) != 4:\n raise ValueError(\"Padding length should be 4.\")\n if x.ndim != 4:\n raise ValueError(\"Input x must be 4-dimensional.\")\n if data_format == \"NCHW\":\n padding = ((0, 0), (0, 0), (padding[2], padding[3]), (padding[0], padding[1]))\n elif data_format == \"NHWC\":\n padding = ((0, 0), (padding[2], padding[3]), (padding[0], padding[1]), (0, 0))\n else:\n raise ValueError(\"Unknown data_format: {}\".format(data_format))\n return ivy.pad(x, padding, mode=\"constant\", constant_values=0.0)\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef linear(x, weight, bias=None, name=None):\n weight = ivy.swapaxes(weight, -1, -2)\n return ivy.linear(x, weight, bias=bias)\n", "path": "ivy/functional/frontends/paddle/nn/functional/common.py"}]}
| 1,429 | 201 |
gh_patches_debug_2077
|
rasdani/github-patches
|
git_diff
|
mitmproxy__mitmproxy-2615
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[requires.io] dependency update on master branch
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2 import runpy
3 from codecs import open
4
5 from setuptools import setup, find_packages
6
7 # Based on https://github.com/pypa/sampleproject/blob/master/setup.py
8 # and https://python-packaging-user-guide.readthedocs.org/
9
10 here = os.path.abspath(os.path.dirname(__file__))
11
12 with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
13 long_description = f.read()
14
15 VERSION = runpy.run_path(os.path.join(here, "mitmproxy", "version.py"))["VERSION"]
16
17 setup(
18 name="mitmproxy",
19 version=VERSION,
20 description="An interactive, SSL-capable, man-in-the-middle HTTP proxy for penetration testers and software developers.",
21 long_description=long_description,
22 url="http://mitmproxy.org",
23 author="Aldo Cortesi",
24 author_email="[email protected]",
25 license="MIT",
26 classifiers=[
27 "License :: OSI Approved :: MIT License",
28 "Development Status :: 5 - Production/Stable",
29 "Environment :: Console",
30 "Environment :: Console :: Curses",
31 "Operating System :: MacOS :: MacOS X",
32 "Operating System :: POSIX",
33 "Operating System :: Microsoft :: Windows",
34 "Programming Language :: Python",
35 "Programming Language :: Python :: 3",
36 "Programming Language :: Python :: 3 :: Only",
37 "Programming Language :: Python :: 3.5",
38 "Programming Language :: Python :: 3.6",
39 "Programming Language :: Python :: Implementation :: CPython",
40 "Topic :: Security",
41 "Topic :: Internet",
42 "Topic :: Internet :: WWW/HTTP",
43 "Topic :: Internet :: Proxy Servers",
44 "Topic :: Software Development :: Testing"
45 ],
46 packages=find_packages(include=[
47 "mitmproxy", "mitmproxy.*",
48 "pathod", "pathod.*",
49 ]),
50 include_package_data=True,
51 entry_points={
52 'console_scripts': [
53 "mitmproxy = mitmproxy.tools.main:mitmproxy",
54 "mitmdump = mitmproxy.tools.main:mitmdump",
55 "mitmweb = mitmproxy.tools.main:mitmweb",
56 "pathod = pathod.pathod_cmdline:go_pathod",
57 "pathoc = pathod.pathoc_cmdline:go_pathoc"
58 ]
59 },
60 # https://packaging.python.org/en/latest/requirements/#install-requires
61 # It is not considered best practice to use install_requires to pin dependencies to specific versions.
62 install_requires=[
63 "blinker>=1.4, <1.5",
64 "brotlipy>=0.5.1, <0.8",
65 "certifi>=2015.11.20.1", # no semver here - this should always be on the last release!
66 "click>=6.2, <7",
67 "cryptography>=2.0,<2.2",
68 "h2>=3.0, <4",
69 "hyperframe>=5.0, <6",
70 "kaitaistruct>=0.7, <0.8",
71 "ldap3>=2.2.0, <2.4",
72 "passlib>=1.6.5, <1.8",
73 "pyasn1>=0.3.1, <0.4",
74 "pyOpenSSL>=17.2,<17.4",
75 "pyparsing>=2.1.3, <2.3",
76 "pyperclip>=1.5.22, <1.6",
77 "requests>=2.9.1, <3",
78 "ruamel.yaml>=0.13.2, <0.16",
79 "sortedcontainers>=1.5.4, <1.6",
80 "tornado>=4.3, <4.6",
81 "urwid>=1.3.1, <1.4",
82 ],
83 extras_require={
84 ':sys_platform == "win32"': [
85 "pydivert>=2.0.3,<2.2",
86 ],
87 'dev': [
88 "flake8>=3.2.1, <3.5",
89 "Flask>=0.10.1, <0.13",
90 "mypy>=0.530,<0.541",
91 "pytest-cov>=2.2.1, <3",
92 "pytest-faulthandler>=1.3.0, <2",
93 "pytest-timeout>=1.0.0, <2",
94 "pytest-xdist>=1.14, <2",
95 "pytest>=3.1, <4",
96 "rstcheck>=2.2, <4.0",
97 "sphinx_rtd_theme>=0.1.9, <0.3",
98 "sphinx-autobuild>=0.5.2, <0.8",
99 "sphinx>=1.3.5, <1.7",
100 "sphinxcontrib-documentedlist>=0.5.0, <0.7",
101 "tox>=2.3, <3",
102 ],
103 'examples': [
104 "beautifulsoup4>=4.4.1, <4.7",
105 "Pillow>=4.3,<4.4",
106 ]
107 }
108 )
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -85,7 +85,7 @@
"pydivert>=2.0.3,<2.2",
],
'dev': [
- "flake8>=3.2.1, <3.5",
+ "flake8>=3.5, <3.6",
"Flask>=0.10.1, <0.13",
"mypy>=0.530,<0.541",
"pytest-cov>=2.2.1, <3",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -85,7 +85,7 @@\n \"pydivert>=2.0.3,<2.2\",\n ],\n 'dev': [\n- \"flake8>=3.2.1, <3.5\",\n+ \"flake8>=3.5, <3.6\",\n \"Flask>=0.10.1, <0.13\",\n \"mypy>=0.530,<0.541\",\n \"pytest-cov>=2.2.1, <3\",\n", "issue": "[requires.io] dependency update on master branch\n\n", "before_files": [{"content": "import os\nimport runpy\nfrom codecs import open\n\nfrom setuptools import setup, find_packages\n\n# Based on https://github.com/pypa/sampleproject/blob/master/setup.py\n# and https://python-packaging-user-guide.readthedocs.org/\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nVERSION = runpy.run_path(os.path.join(here, \"mitmproxy\", \"version.py\"))[\"VERSION\"]\n\nsetup(\n name=\"mitmproxy\",\n version=VERSION,\n description=\"An interactive, SSL-capable, man-in-the-middle HTTP proxy for penetration testers and software developers.\",\n long_description=long_description,\n url=\"http://mitmproxy.org\",\n author=\"Aldo Cortesi\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Environment :: Console :: Curses\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Security\",\n \"Topic :: Internet\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: Proxy Servers\",\n \"Topic :: Software Development :: Testing\"\n ],\n packages=find_packages(include=[\n \"mitmproxy\", \"mitmproxy.*\",\n \"pathod\", \"pathod.*\",\n ]),\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n \"mitmproxy = mitmproxy.tools.main:mitmproxy\",\n \"mitmdump = mitmproxy.tools.main:mitmdump\",\n \"mitmweb = mitmproxy.tools.main:mitmweb\",\n \"pathod = pathod.pathod_cmdline:go_pathod\",\n \"pathoc = pathod.pathoc_cmdline:go_pathoc\"\n ]\n },\n # https://packaging.python.org/en/latest/requirements/#install-requires\n # It is not considered best practice to use install_requires to pin dependencies to specific versions.\n install_requires=[\n \"blinker>=1.4, <1.5\",\n \"brotlipy>=0.5.1, <0.8\",\n \"certifi>=2015.11.20.1\", # no semver here - this should always be on the last release!\n \"click>=6.2, <7\",\n \"cryptography>=2.0,<2.2\",\n \"h2>=3.0, <4\",\n \"hyperframe>=5.0, <6\",\n \"kaitaistruct>=0.7, <0.8\",\n \"ldap3>=2.2.0, <2.4\",\n \"passlib>=1.6.5, <1.8\",\n \"pyasn1>=0.3.1, <0.4\",\n \"pyOpenSSL>=17.2,<17.4\",\n \"pyparsing>=2.1.3, <2.3\",\n \"pyperclip>=1.5.22, <1.6\",\n \"requests>=2.9.1, <3\",\n \"ruamel.yaml>=0.13.2, <0.16\",\n \"sortedcontainers>=1.5.4, <1.6\",\n \"tornado>=4.3, <4.6\",\n \"urwid>=1.3.1, <1.4\",\n ],\n extras_require={\n ':sys_platform == \"win32\"': [\n \"pydivert>=2.0.3,<2.2\",\n ],\n 'dev': [\n \"flake8>=3.2.1, <3.5\",\n \"Flask>=0.10.1, <0.13\",\n \"mypy>=0.530,<0.541\",\n \"pytest-cov>=2.2.1, <3\",\n \"pytest-faulthandler>=1.3.0, <2\",\n \"pytest-timeout>=1.0.0, <2\",\n \"pytest-xdist>=1.14, <2\",\n \"pytest>=3.1, <4\",\n \"rstcheck>=2.2, <4.0\",\n \"sphinx_rtd_theme>=0.1.9, <0.3\",\n \"sphinx-autobuild>=0.5.2, <0.8\",\n \"sphinx>=1.3.5, <1.7\",\n \"sphinxcontrib-documentedlist>=0.5.0, <0.7\",\n \"tox>=2.3, <3\",\n ],\n 'examples': [\n \"beautifulsoup4>=4.4.1, <4.7\",\n \"Pillow>=4.3,<4.4\",\n ]\n }\n)\n", "path": "setup.py"}], "after_files": [{"content": "import os\nimport runpy\nfrom codecs import open\n\nfrom setuptools import setup, find_packages\n\n# Based on https://github.com/pypa/sampleproject/blob/master/setup.py\n# and https://python-packaging-user-guide.readthedocs.org/\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nVERSION = runpy.run_path(os.path.join(here, \"mitmproxy\", \"version.py\"))[\"VERSION\"]\n\nsetup(\n name=\"mitmproxy\",\n version=VERSION,\n description=\"An interactive, SSL-capable, man-in-the-middle HTTP proxy for penetration testers and software developers.\",\n long_description=long_description,\n url=\"http://mitmproxy.org\",\n author=\"Aldo Cortesi\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Environment :: Console :: Curses\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Security\",\n \"Topic :: Internet\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: Proxy Servers\",\n \"Topic :: Software Development :: Testing\"\n ],\n packages=find_packages(include=[\n \"mitmproxy\", \"mitmproxy.*\",\n \"pathod\", \"pathod.*\",\n ]),\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n \"mitmproxy = mitmproxy.tools.main:mitmproxy\",\n \"mitmdump = mitmproxy.tools.main:mitmdump\",\n \"mitmweb = mitmproxy.tools.main:mitmweb\",\n \"pathod = pathod.pathod_cmdline:go_pathod\",\n \"pathoc = pathod.pathoc_cmdline:go_pathoc\"\n ]\n },\n # https://packaging.python.org/en/latest/requirements/#install-requires\n # It is not considered best practice to use install_requires to pin dependencies to specific versions.\n install_requires=[\n \"blinker>=1.4, <1.5\",\n \"brotlipy>=0.5.1, <0.8\",\n \"certifi>=2015.11.20.1\", # no semver here - this should always be on the last release!\n \"click>=6.2, <7\",\n \"cryptography>=2.0,<2.2\",\n \"h2>=3.0, <4\",\n \"hyperframe>=5.0, <6\",\n \"kaitaistruct>=0.7, <0.8\",\n \"ldap3>=2.2.0, <2.4\",\n \"passlib>=1.6.5, <1.8\",\n \"pyasn1>=0.3.1, <0.4\",\n \"pyOpenSSL>=17.2,<17.4\",\n \"pyparsing>=2.1.3, <2.3\",\n \"pyperclip>=1.5.22, <1.6\",\n \"requests>=2.9.1, <3\",\n \"ruamel.yaml>=0.13.2, <0.16\",\n \"sortedcontainers>=1.5.4, <1.6\",\n \"tornado>=4.3, <4.6\",\n \"urwid>=1.3.1, <1.4\",\n ],\n extras_require={\n ':sys_platform == \"win32\"': [\n \"pydivert>=2.0.3,<2.2\",\n ],\n 'dev': [\n \"flake8>=3.5, <3.6\",\n \"Flask>=0.10.1, <0.13\",\n \"mypy>=0.530,<0.541\",\n \"pytest-cov>=2.2.1, <3\",\n \"pytest-faulthandler>=1.3.0, <2\",\n \"pytest-timeout>=1.0.0, <2\",\n \"pytest-xdist>=1.14, <2\",\n \"pytest>=3.1, <4\",\n \"rstcheck>=2.2, <4.0\",\n \"sphinx_rtd_theme>=0.1.9, <0.3\",\n \"sphinx-autobuild>=0.5.2, <0.8\",\n \"sphinx>=1.3.5, <1.7\",\n \"sphinxcontrib-documentedlist>=0.5.0, <0.7\",\n \"tox>=2.3, <3\",\n ],\n 'examples': [\n \"beautifulsoup4>=4.4.1, <4.7\",\n \"Pillow>=4.3,<4.4\",\n ]\n }\n)\n", "path": "setup.py"}]}
| 1,641 | 137 |
gh_patches_debug_28334
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-3104
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
only caputring the first line https://github.com/bridgecrewio/checkov/blob/2.0.1131/checkov/dockerfile/checks/WorkdirIsAbsolute.py
def scan_entity_conf(self, conf):
for mydir in conf:
mypath = mydir["value"]
if re.match(PATH, mypath):
return CheckResult.FAILED, mydir
return CheckResult.PASSED, None
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/dockerfile/checks/WorkdirIsAbsolute.py`
Content:
```
1 import re
2
3 from checkov.common.models.enums import CheckCategories, CheckResult
4 from checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck
5
6 ISABSOLUTE = re.compile("(^/[A-z0-9-_+]*)|(^[A-z0-9-_+]:\\\\.*)|(^\\$[{}A-z0-9-_+].*)")
7
8
9 class WorkdirIsAbsolute(BaseDockerfileCheck):
10 def __init__(self):
11 """
12 For clarity and reliability, you should always use absolute paths for your WORKDIR.
13 """
14 name = "Ensure that WORKDIR values are absolute paths"
15 id = "CKV_DOCKER_10"
16 supported_instructions = ["WORKDIR"]
17 categories = [CheckCategories.CONVENTION]
18 super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)
19
20 def scan_entity_conf(self, conf):
21 for mydir in conf:
22 mypath = mydir["value"]
23 if not re.match(ISABSOLUTE, mypath):
24 return CheckResult.FAILED, mydir
25 return CheckResult.PASSED, None
26
27
28 check = WorkdirIsAbsolute()
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/checkov/dockerfile/checks/WorkdirIsAbsolute.py b/checkov/dockerfile/checks/WorkdirIsAbsolute.py
--- a/checkov/dockerfile/checks/WorkdirIsAbsolute.py
+++ b/checkov/dockerfile/checks/WorkdirIsAbsolute.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import re
from checkov.common.models.enums import CheckCategories, CheckResult
@@ -7,21 +9,26 @@
class WorkdirIsAbsolute(BaseDockerfileCheck):
- def __init__(self):
+ def __init__(self) -> None:
"""
For clarity and reliability, you should always use absolute paths for your WORKDIR.
"""
name = "Ensure that WORKDIR values are absolute paths"
id = "CKV_DOCKER_10"
- supported_instructions = ["WORKDIR"]
- categories = [CheckCategories.CONVENTION]
+ supported_instructions = ("WORKDIR",)
+ categories = (CheckCategories.CONVENTION,)
super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)
- def scan_entity_conf(self, conf):
- for mydir in conf:
- mypath = mydir["value"]
- if not re.match(ISABSOLUTE, mypath):
- return CheckResult.FAILED, mydir
+ def scan_entity_conf(self, conf: list[dict[str, int | str]]) -> tuple[CheckResult, list[dict[str, int | str]] | None]:
+ workdirs = []
+ for workdir in conf:
+ path = workdir["value"]
+ if not re.match(ISABSOLUTE, path):
+ workdirs.append(workdir)
+
+ if workdirs:
+ return CheckResult.FAILED, workdirs
+
return CheckResult.PASSED, None
|
{"golden_diff": "diff --git a/checkov/dockerfile/checks/WorkdirIsAbsolute.py b/checkov/dockerfile/checks/WorkdirIsAbsolute.py\n--- a/checkov/dockerfile/checks/WorkdirIsAbsolute.py\n+++ b/checkov/dockerfile/checks/WorkdirIsAbsolute.py\n@@ -1,3 +1,5 @@\n+from __future__ import annotations\n+\n import re\n \n from checkov.common.models.enums import CheckCategories, CheckResult\n@@ -7,21 +9,26 @@\n \n \n class WorkdirIsAbsolute(BaseDockerfileCheck):\n- def __init__(self):\n+ def __init__(self) -> None:\n \"\"\"\n For clarity and reliability, you should always use absolute paths for your WORKDIR.\n \"\"\"\n name = \"Ensure that WORKDIR values are absolute paths\"\n id = \"CKV_DOCKER_10\"\n- supported_instructions = [\"WORKDIR\"]\n- categories = [CheckCategories.CONVENTION]\n+ supported_instructions = (\"WORKDIR\",)\n+ categories = (CheckCategories.CONVENTION,)\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n \n- def scan_entity_conf(self, conf):\n- for mydir in conf:\n- mypath = mydir[\"value\"]\n- if not re.match(ISABSOLUTE, mypath):\n- return CheckResult.FAILED, mydir\n+ def scan_entity_conf(self, conf: list[dict[str, int | str]]) -> tuple[CheckResult, list[dict[str, int | str]] | None]:\n+ workdirs = []\n+ for workdir in conf:\n+ path = workdir[\"value\"]\n+ if not re.match(ISABSOLUTE, path):\n+ workdirs.append(workdir)\n+\n+ if workdirs:\n+ return CheckResult.FAILED, workdirs\n+\n return CheckResult.PASSED, None\n", "issue": "only caputring the first line https://github.com/bridgecrewio/checkov/blob/2.0.1131/checkov/dockerfile/checks/WorkdirIsAbsolute.py\ndef scan_entity_conf(self, conf):\r\n for mydir in conf:\r\n mypath = mydir[\"value\"]\r\n if re.match(PATH, mypath):\r\n return CheckResult.FAILED, mydir\r\n return CheckResult.PASSED, None\n", "before_files": [{"content": "import re\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck\n\nISABSOLUTE = re.compile(\"(^/[A-z0-9-_+]*)|(^[A-z0-9-_+]:\\\\\\\\.*)|(^\\\\$[{}A-z0-9-_+].*)\")\n\n\nclass WorkdirIsAbsolute(BaseDockerfileCheck):\n def __init__(self):\n \"\"\"\n For clarity and reliability, you should always use absolute paths for your WORKDIR.\n \"\"\"\n name = \"Ensure that WORKDIR values are absolute paths\"\n id = \"CKV_DOCKER_10\"\n supported_instructions = [\"WORKDIR\"]\n categories = [CheckCategories.CONVENTION]\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n\n def scan_entity_conf(self, conf):\n for mydir in conf:\n mypath = mydir[\"value\"]\n if not re.match(ISABSOLUTE, mypath):\n return CheckResult.FAILED, mydir\n return CheckResult.PASSED, None\n\n\ncheck = WorkdirIsAbsolute()\n", "path": "checkov/dockerfile/checks/WorkdirIsAbsolute.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport re\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck\n\nISABSOLUTE = re.compile(\"(^/[A-z0-9-_+]*)|(^[A-z0-9-_+]:\\\\\\\\.*)|(^\\\\$[{}A-z0-9-_+].*)\")\n\n\nclass WorkdirIsAbsolute(BaseDockerfileCheck):\n def __init__(self) -> None:\n \"\"\"\n For clarity and reliability, you should always use absolute paths for your WORKDIR.\n \"\"\"\n name = \"Ensure that WORKDIR values are absolute paths\"\n id = \"CKV_DOCKER_10\"\n supported_instructions = (\"WORKDIR\",)\n categories = (CheckCategories.CONVENTION,)\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n\n def scan_entity_conf(self, conf: list[dict[str, int | str]]) -> tuple[CheckResult, list[dict[str, int | str]] | None]:\n workdirs = []\n for workdir in conf:\n path = workdir[\"value\"]\n if not re.match(ISABSOLUTE, path):\n workdirs.append(workdir)\n\n if workdirs:\n return CheckResult.FAILED, workdirs\n\n return CheckResult.PASSED, None\n\n\ncheck = WorkdirIsAbsolute()\n", "path": "checkov/dockerfile/checks/WorkdirIsAbsolute.py"}]}
| 663 | 411 |
gh_patches_debug_29107
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-410
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Nested stack reference to InstanceProfile triggers E2502 Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile
*cfn-lint version: `0.8.1`*
# Description of issue
When using nested stacks and passing IamInstanceProfile ARNs between stacks, E2502 is triggered though it shouldn't be.
# Steps to reproduce
Create a parent template like this
```yaml
AWSTemplateFormatVersion: 2010-09-09
Resources:
IAMInstanceProfile:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: https://s3-us-west-2.amazonaws.com/example-bucket/example-instance-profile.yml
Instance:
Type: AWS::CloudFormation::Stack
Properties:
Parameters:
IamInstanceProfile: !GetAtt IAMInstanceProfile.Outputs.InstanceProfileArn
TemplateURL: https://s3-us-west-2.amazonaws.com/example-bucket/example-instance.yml
```
and a child template like this
```yaml
AWSTemplateFormatVersion: 2010-09-09
Resources:
InstanceProfile:
Type: AWS::IAM::InstanceProfile
Properties:
Roles:
- ExampleRole
Outputs:
InstanceProfileArn:
Value: !GetAtt InstanceProfile.Arn
```
# Expected results
The `IamInstanceProfile` parameter in the parent template's `Instance` sub-stack resource definition does indeed contain a valid IAM Instance Profile ARN (passed in from the `IAMInstanceProfile` sub-stack resource and as a result, there should be no error.
Ideally cfn-lint would recognize that `GetAtt` is referencing an output from another stack which very well could be an InstanceProfile ARN and as a result, optimistically not report this error.
Alternatively, if cfn-lint could introspect the sub-stack and determine the object type of the output, it would know whether or not it was the correct object type.
# Actual results
cfn-lint reports the error
> E2502 Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for Resources/Instance/Properties/Parameters/IamInstanceProfile/Fn::GetAtt
> example-parent.yml:11:9
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/resources/iam/InstanceProfile.py`
Content:
```
1 """
2 Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 from cfnlint import CloudFormationLintRule
18 from cfnlint import RuleMatch
19
20
21 class InstanceProfile(CloudFormationLintRule):
22 """Check if IamInstanceProfile are used"""
23 id = 'E2502'
24 shortdesc = 'Check if IamInstanceProfile are using the name and not ARN'
25 description = 'See if there are any properties IamInstanceProfile' + \
26 'are using name and not ARN'
27 source_url = 'https://github.com/awslabs/cfn-python-lint'
28 tags = ['properties']
29
30 def match(self, cfn):
31 """Check CloudFormation IamInstanceProfile Parameters"""
32
33 matches = []
34
35 # Build the list of keys
36 trees = cfn.search_deep_keys('Fn::GetAtt')
37 # Filter only resources
38 # Disable pylint for Pylint 2
39 # pylint: disable=W0110
40 trees = filter(lambda x: x[0] == 'Resources', trees)
41 for tree in trees:
42 if any(e == 'IamInstanceProfile' for e in tree):
43 obj = tree[-1]
44 objtype = cfn.template.get('Resources', {}).get(obj[0], {}).get('Type')
45 if objtype:
46 if objtype != 'AWS::IAM::InstanceProfile':
47 message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (
48 '/'.join(map(str, tree[:-1])))
49 matches.append(RuleMatch(tree[:-1], message))
50 else:
51 if cfn.template.get('Resources', {}).get(tree[1], {}).get('Type') in ['AWS::EC2::SpotFleet']:
52 if obj[1] != 'Arn':
53 message = 'Property IamInstanceProfile should be an ARN for %s' % (
54 '/'.join(map(str, tree[:-1])))
55 matches.append(RuleMatch(tree[:-1], message))
56 else:
57 if obj[1] == 'Arn':
58 message = 'Property IamInstanceProfile shouldn\'t be an ARN for %s' % (
59 '/'.join(map(str, tree[:-1])))
60 matches.append(RuleMatch(tree[:-1], message))
61
62 # Search Refs
63 trees = cfn.search_deep_keys('Ref')
64 # Filter only resoureces
65 trees = filter(lambda x: x[0] == 'Resources', trees)
66 for tree in trees:
67 if any(e == 'IamInstanceProfile' for e in tree):
68 obj = tree[-1]
69 objtype = cfn.template.get('Resources', {}).get(obj, {}).get('Type')
70 if objtype:
71 if objtype != 'AWS::IAM::InstanceProfile':
72 message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (
73 '/'.join(map(str, tree[:-1])))
74 matches.append(RuleMatch(tree[:-1], message))
75
76 return matches
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cfnlint/rules/resources/iam/InstanceProfile.py b/src/cfnlint/rules/resources/iam/InstanceProfile.py
--- a/src/cfnlint/rules/resources/iam/InstanceProfile.py
+++ b/src/cfnlint/rules/resources/iam/InstanceProfile.py
@@ -43,12 +43,17 @@
obj = tree[-1]
objtype = cfn.template.get('Resources', {}).get(obj[0], {}).get('Type')
if objtype:
- if objtype != 'AWS::IAM::InstanceProfile':
+ if objtype not in ['AWS::IAM::InstanceProfile', 'AWS::CloudFormation::Stack', 'AWS::CloudFormation::CustomResource']:
message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (
'/'.join(map(str, tree[:-1])))
matches.append(RuleMatch(tree[:-1], message))
else:
- if cfn.template.get('Resources', {}).get(tree[1], {}).get('Type') in ['AWS::EC2::SpotFleet']:
+ if objtype in ['AWS::CloudFormation::Stack']:
+ if obj[1] != 'Outputs':
+ message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (
+ '/'.join(map(str, tree[:-1])))
+ matches.append(RuleMatch(tree[:-1], message))
+ elif cfn.template.get('Resources', {}).get(tree[1], {}).get('Type') in ['AWS::EC2::SpotFleet']:
if obj[1] != 'Arn':
message = 'Property IamInstanceProfile should be an ARN for %s' % (
'/'.join(map(str, tree[:-1])))
|
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/iam/InstanceProfile.py b/src/cfnlint/rules/resources/iam/InstanceProfile.py\n--- a/src/cfnlint/rules/resources/iam/InstanceProfile.py\n+++ b/src/cfnlint/rules/resources/iam/InstanceProfile.py\n@@ -43,12 +43,17 @@\n obj = tree[-1]\n objtype = cfn.template.get('Resources', {}).get(obj[0], {}).get('Type')\n if objtype:\n- if objtype != 'AWS::IAM::InstanceProfile':\n+ if objtype not in ['AWS::IAM::InstanceProfile', 'AWS::CloudFormation::Stack', 'AWS::CloudFormation::CustomResource']:\n message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n else:\n- if cfn.template.get('Resources', {}).get(tree[1], {}).get('Type') in ['AWS::EC2::SpotFleet']:\n+ if objtype in ['AWS::CloudFormation::Stack']:\n+ if obj[1] != 'Outputs':\n+ message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (\n+ '/'.join(map(str, tree[:-1])))\n+ matches.append(RuleMatch(tree[:-1], message))\n+ elif cfn.template.get('Resources', {}).get(tree[1], {}).get('Type') in ['AWS::EC2::SpotFleet']:\n if obj[1] != 'Arn':\n message = 'Property IamInstanceProfile should be an ARN for %s' % (\n '/'.join(map(str, tree[:-1])))\n", "issue": "Nested stack reference to InstanceProfile triggers E2502 Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile\n*cfn-lint version: `0.8.1`*\r\n\r\n# Description of issue\r\n\r\nWhen using nested stacks and passing IamInstanceProfile ARNs between stacks, E2502 is triggered though it shouldn't be.\r\n\r\n# Steps to reproduce\r\n\r\nCreate a parent template like this\r\n```yaml\r\nAWSTemplateFormatVersion: 2010-09-09\r\nResources:\r\n IAMInstanceProfile:\r\n Type: AWS::CloudFormation::Stack\r\n Properties:\r\n TemplateURL: https://s3-us-west-2.amazonaws.com/example-bucket/example-instance-profile.yml\r\n Instance:\r\n Type: AWS::CloudFormation::Stack\r\n Properties:\r\n Parameters:\r\n IamInstanceProfile: !GetAtt IAMInstanceProfile.Outputs.InstanceProfileArn\r\n TemplateURL: https://s3-us-west-2.amazonaws.com/example-bucket/example-instance.yml\r\n```\r\nand a child template like this\r\n\r\n```yaml\r\nAWSTemplateFormatVersion: 2010-09-09\r\nResources:\r\n InstanceProfile:\r\n Type: AWS::IAM::InstanceProfile\r\n Properties:\r\n Roles:\r\n - ExampleRole\r\nOutputs:\r\n InstanceProfileArn:\r\n Value: !GetAtt InstanceProfile.Arn\r\n```\r\n\r\n# Expected results\r\n\r\nThe `IamInstanceProfile` parameter in the parent template's `Instance` sub-stack resource definition does indeed contain a valid IAM Instance Profile ARN (passed in from the `IAMInstanceProfile` sub-stack resource and as a result, there should be no error.\r\n\r\nIdeally cfn-lint would recognize that `GetAtt` is referencing an output from another stack which very well could be an InstanceProfile ARN and as a result, optimistically not report this error.\r\n\r\nAlternatively, if cfn-lint could introspect the sub-stack and determine the object type of the output, it would know whether or not it was the correct object type.\r\n\r\n# Actual results\r\n\r\ncfn-lint reports the error\r\n\r\n> E2502 Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for Resources/Instance/Properties/Parameters/IamInstanceProfile/Fn::GetAtt\r\n> example-parent.yml:11:9\n", "before_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass InstanceProfile(CloudFormationLintRule):\n \"\"\"Check if IamInstanceProfile are used\"\"\"\n id = 'E2502'\n shortdesc = 'Check if IamInstanceProfile are using the name and not ARN'\n description = 'See if there are any properties IamInstanceProfile' + \\\n 'are using name and not ARN'\n source_url = 'https://github.com/awslabs/cfn-python-lint'\n tags = ['properties']\n\n def match(self, cfn):\n \"\"\"Check CloudFormation IamInstanceProfile Parameters\"\"\"\n\n matches = []\n\n # Build the list of keys\n trees = cfn.search_deep_keys('Fn::GetAtt')\n # Filter only resources\n # Disable pylint for Pylint 2\n # pylint: disable=W0110\n trees = filter(lambda x: x[0] == 'Resources', trees)\n for tree in trees:\n if any(e == 'IamInstanceProfile' for e in tree):\n obj = tree[-1]\n objtype = cfn.template.get('Resources', {}).get(obj[0], {}).get('Type')\n if objtype:\n if objtype != 'AWS::IAM::InstanceProfile':\n message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n else:\n if cfn.template.get('Resources', {}).get(tree[1], {}).get('Type') in ['AWS::EC2::SpotFleet']:\n if obj[1] != 'Arn':\n message = 'Property IamInstanceProfile should be an ARN for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n else:\n if obj[1] == 'Arn':\n message = 'Property IamInstanceProfile shouldn\\'t be an ARN for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n\n # Search Refs\n trees = cfn.search_deep_keys('Ref')\n # Filter only resoureces\n trees = filter(lambda x: x[0] == 'Resources', trees)\n for tree in trees:\n if any(e == 'IamInstanceProfile' for e in tree):\n obj = tree[-1]\n objtype = cfn.template.get('Resources', {}).get(obj, {}).get('Type')\n if objtype:\n if objtype != 'AWS::IAM::InstanceProfile':\n message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n\n return matches\n", "path": "src/cfnlint/rules/resources/iam/InstanceProfile.py"}], "after_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass InstanceProfile(CloudFormationLintRule):\n \"\"\"Check if IamInstanceProfile are used\"\"\"\n id = 'E2502'\n shortdesc = 'Check if IamInstanceProfile are using the name and not ARN'\n description = 'See if there are any properties IamInstanceProfile' + \\\n 'are using name and not ARN'\n source_url = 'https://github.com/awslabs/cfn-python-lint'\n tags = ['properties']\n\n def match(self, cfn):\n \"\"\"Check CloudFormation IamInstanceProfile Parameters\"\"\"\n\n matches = []\n\n # Build the list of keys\n trees = cfn.search_deep_keys('Fn::GetAtt')\n # Filter only resources\n # Disable pylint for Pylint 2\n # pylint: disable=W0110\n trees = filter(lambda x: x[0] == 'Resources', trees)\n for tree in trees:\n if any(e == 'IamInstanceProfile' for e in tree):\n obj = tree[-1]\n objtype = cfn.template.get('Resources', {}).get(obj[0], {}).get('Type')\n if objtype:\n if objtype not in ['AWS::IAM::InstanceProfile', 'AWS::CloudFormation::Stack', 'AWS::CloudFormation::CustomResource']:\n message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n else:\n if objtype in ['AWS::CloudFormation::Stack']:\n if obj[1] != 'Outputs':\n message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n elif cfn.template.get('Resources', {}).get(tree[1], {}).get('Type') in ['AWS::EC2::SpotFleet']:\n if obj[1] != 'Arn':\n message = 'Property IamInstanceProfile should be an ARN for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n else:\n if obj[1] == 'Arn':\n message = 'Property IamInstanceProfile shouldn\\'t be an ARN for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n\n # Search Refs\n trees = cfn.search_deep_keys('Ref')\n # Filter only resoureces\n trees = filter(lambda x: x[0] == 'Resources', trees)\n for tree in trees:\n if any(e == 'IamInstanceProfile' for e in tree):\n obj = tree[-1]\n objtype = cfn.template.get('Resources', {}).get(obj, {}).get('Type')\n if objtype:\n if objtype != 'AWS::IAM::InstanceProfile':\n message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n\n return matches\n", "path": "src/cfnlint/rules/resources/iam/InstanceProfile.py"}]}
| 1,736 | 384 |
gh_patches_debug_34951
|
rasdani/github-patches
|
git_diff
|
biopython__biopython-3706
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
lcc.lcc_mult returns extra zero value
### Setup
I am reporting a problem with Biopython version, Python version, and operating
system as follows:
```python
import sys; print(sys.version)
import platform; print(platform.python_implementation()); print(platform.platform())
import Bio; print(Bio.__version__)
```
[GCC 4.8.5 20150623 (Red Hat 4.8.5-39)]
CPython
Linux-3.10.0-957.1.3.el7.x86_64-x86_64-with-centos-7.6.1810-Core
1.78
### Expected behaviour
[1.9056390622295662]
### Actual behaviour
[0, 1.9056390622295662]
### Steps to reproduce
lcc.lcc_mult('ACGATAGC', 8)
In addition according the [article](https://www.researchgate.net/publication/229964618_Sequence_Complexity_and_Composition), the calculation of LCC uses log base 4 but the implementation uses log base 2. That is why for the example sequence 5 in Table 1 the value shown is half the function value.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Bio/SeqUtils/lcc.py`
Content:
```
1 # Copyright 2003, 2007 by Sebastian Bassi. [email protected]
2 # All rights reserved.
3 #
4 # This file is part of the Biopython distribution and governed by your
5 # choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
6 # Please see the LICENSE file that should have been included as part of this
7 # package.
8 """Local Composition Complexity."""
9
10 import math
11
12
13 def lcc_mult(seq, wsize):
14 """Calculate Local Composition Complexity (LCC) values over sliding window.
15
16 Returns a list of floats, the LCC values for a sliding window over
17 the sequence.
18
19 seq - an unambiguous DNA sequence (a string or Seq object)
20 wsize - window size, integer
21
22 The result is the same as applying lcc_simp multiple times, but this
23 version is optimized for speed. The optimization works by using the
24 value of previous window as a base to compute the next one.
25 """
26 l2 = math.log(2)
27 tamseq = len(seq)
28 upper = str(seq).upper()
29 compone = [0]
30 lccsal = [0]
31 for i in range(wsize):
32 compone.append(
33 ((i + 1) / float(wsize)) * ((math.log((i + 1) / float(wsize))) / l2)
34 )
35 window = seq[0:wsize]
36 cant_a = window.count("A")
37 cant_c = window.count("C")
38 cant_t = window.count("T")
39 cant_g = window.count("G")
40 term_a = compone[cant_a]
41 term_c = compone[cant_c]
42 term_t = compone[cant_t]
43 term_g = compone[cant_g]
44 lccsal.append(-(term_a + term_c + term_t + term_g))
45 tail = seq[0]
46 for x in range(tamseq - wsize):
47 window = upper[x + 1 : wsize + x + 1]
48 if tail == window[-1]:
49 lccsal.append(lccsal[-1])
50 elif tail == "A":
51 cant_a -= 1
52 if window.endswith("C"):
53 cant_c += 1
54 term_a = compone[cant_a]
55 term_c = compone[cant_c]
56 lccsal.append(-(term_a + term_c + term_t + term_g))
57 elif window.endswith("T"):
58 cant_t += 1
59 term_a = compone[cant_a]
60 term_t = compone[cant_t]
61 lccsal.append(-(term_a + term_c + term_t + term_g))
62 elif window.endswith("G"):
63 cant_g += 1
64 term_a = compone[cant_a]
65 term_g = compone[cant_g]
66 lccsal.append(-(term_a + term_c + term_t + term_g))
67 elif tail == "C":
68 cant_c -= 1
69 if window.endswith("A"):
70 cant_a += 1
71 term_a = compone[cant_a]
72 term_c = compone[cant_c]
73 lccsal.append(-(term_a + term_c + term_t + term_g))
74 elif window.endswith("T"):
75 cant_t += 1
76 term_c = compone[cant_c]
77 term_t = compone[cant_t]
78 lccsal.append(-(term_a + term_c + term_t + term_g))
79 elif window.endswith("G"):
80 cant_g += 1
81 term_c = compone[cant_c]
82 term_g = compone[cant_g]
83 lccsal.append(-(term_a + term_c + term_t + term_g))
84 elif tail == "T":
85 cant_t -= 1
86 if window.endswith("A"):
87 cant_a += 1
88 term_a = compone[cant_a]
89 term_t = compone[cant_t]
90 lccsal.append(-(term_a + term_c + term_t + term_g))
91 elif window.endswith("C"):
92 cant_c += 1
93 term_c = compone[cant_c]
94 term_t = compone[cant_t]
95 lccsal.append(-(term_a + term_c + term_t + term_g))
96 elif window.endswith("G"):
97 cant_g += 1
98 term_t = compone[cant_t]
99 term_g = compone[cant_g]
100 lccsal.append(-(term_a + term_c + term_t + term_g))
101 elif tail == "G":
102 cant_g -= 1
103 if window.endswith("A"):
104 cant_a += 1
105 term_a = compone[cant_a]
106 term_g = compone[cant_g]
107 lccsal.append(-(term_a + term_c + term_t + term_g))
108 elif window.endswith("C"):
109 cant_c += 1
110 term_c = compone[cant_c]
111 term_g = compone[cant_g]
112 lccsal.append(-(term_a + term_c + term_t + term_g))
113 elif window.endswith("T"):
114 cant_t += 1
115 term_t = compone[cant_t]
116 term_g = compone[cant_g]
117 lccsal.append(-(term_a + term_c + term_t + term_g))
118 tail = window[0]
119 return lccsal
120
121
122 def lcc_simp(seq):
123 """Calculate Local Composition Complexity (LCC) for a sequence.
124
125 seq - an unambiguous DNA sequence (a string or Seq object)
126
127 Returns the Local Composition Complexity (LCC) value for the entire
128 sequence (as a float).
129
130 Reference:
131 Andrzej K Konopka (2005) Sequence Complexity and Composition
132 https://doi.org/10.1038/npg.els.0005260
133 """
134 wsize = len(seq)
135 upper = str(seq).upper()
136 l2 = math.log(2)
137 if "A" not in seq:
138 term_a = 0
139 # Check to avoid calculating the log of 0.
140 else:
141 term_a = ((upper.count("A")) / float(wsize)) * (
142 (math.log((upper.count("A")) / float(wsize))) / l2
143 )
144 if "C" not in seq:
145 term_c = 0
146 else:
147 term_c = ((upper.count("C")) / float(wsize)) * (
148 (math.log((upper.count("C")) / float(wsize))) / l2
149 )
150 if "T" not in seq:
151 term_t = 0
152 else:
153 term_t = ((upper.count("T")) / float(wsize)) * (
154 (math.log((upper.count("T")) / float(wsize))) / l2
155 )
156 if "G" not in seq:
157 term_g = 0
158 else:
159 term_g = ((upper.count("G")) / float(wsize)) * (
160 (math.log((upper.count("G")) / float(wsize))) / l2
161 )
162 return -(term_a + term_c + term_t + term_g)
163
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/Bio/SeqUtils/lcc.py b/Bio/SeqUtils/lcc.py
--- a/Bio/SeqUtils/lcc.py
+++ b/Bio/SeqUtils/lcc.py
@@ -23,14 +23,14 @@
version is optimized for speed. The optimization works by using the
value of previous window as a base to compute the next one.
"""
- l2 = math.log(2)
+ l4 = math.log(4)
tamseq = len(seq)
upper = str(seq).upper()
compone = [0]
- lccsal = [0]
+ lccsal = []
for i in range(wsize):
compone.append(
- ((i + 1) / float(wsize)) * ((math.log((i + 1) / float(wsize))) / l2)
+ ((i + 1) / float(wsize)) * ((math.log((i + 1) / float(wsize))) / l4)
)
window = seq[0:wsize]
cant_a = window.count("A")
@@ -133,30 +133,30 @@
"""
wsize = len(seq)
upper = str(seq).upper()
- l2 = math.log(2)
+ l4 = math.log(4)
+ # Check to avoid calculating the log of 0.
if "A" not in seq:
term_a = 0
- # Check to avoid calculating the log of 0.
else:
term_a = ((upper.count("A")) / float(wsize)) * (
- (math.log((upper.count("A")) / float(wsize))) / l2
+ (math.log((upper.count("A")) / float(wsize))) / l4
)
if "C" not in seq:
term_c = 0
else:
term_c = ((upper.count("C")) / float(wsize)) * (
- (math.log((upper.count("C")) / float(wsize))) / l2
+ (math.log((upper.count("C")) / float(wsize))) / l4
)
if "T" not in seq:
term_t = 0
else:
term_t = ((upper.count("T")) / float(wsize)) * (
- (math.log((upper.count("T")) / float(wsize))) / l2
+ (math.log((upper.count("T")) / float(wsize))) / l4
)
if "G" not in seq:
term_g = 0
else:
term_g = ((upper.count("G")) / float(wsize)) * (
- (math.log((upper.count("G")) / float(wsize))) / l2
+ (math.log((upper.count("G")) / float(wsize))) / l4
)
return -(term_a + term_c + term_t + term_g)
|
{"golden_diff": "diff --git a/Bio/SeqUtils/lcc.py b/Bio/SeqUtils/lcc.py\n--- a/Bio/SeqUtils/lcc.py\n+++ b/Bio/SeqUtils/lcc.py\n@@ -23,14 +23,14 @@\n version is optimized for speed. The optimization works by using the\n value of previous window as a base to compute the next one.\n \"\"\"\n- l2 = math.log(2)\n+ l4 = math.log(4)\n tamseq = len(seq)\n upper = str(seq).upper()\n compone = [0]\n- lccsal = [0]\n+ lccsal = []\n for i in range(wsize):\n compone.append(\n- ((i + 1) / float(wsize)) * ((math.log((i + 1) / float(wsize))) / l2)\n+ ((i + 1) / float(wsize)) * ((math.log((i + 1) / float(wsize))) / l4)\n )\n window = seq[0:wsize]\n cant_a = window.count(\"A\")\n@@ -133,30 +133,30 @@\n \"\"\"\n wsize = len(seq)\n upper = str(seq).upper()\n- l2 = math.log(2)\n+ l4 = math.log(4)\n+ # Check to avoid calculating the log of 0.\n if \"A\" not in seq:\n term_a = 0\n- # Check to avoid calculating the log of 0.\n else:\n term_a = ((upper.count(\"A\")) / float(wsize)) * (\n- (math.log((upper.count(\"A\")) / float(wsize))) / l2\n+ (math.log((upper.count(\"A\")) / float(wsize))) / l4\n )\n if \"C\" not in seq:\n term_c = 0\n else:\n term_c = ((upper.count(\"C\")) / float(wsize)) * (\n- (math.log((upper.count(\"C\")) / float(wsize))) / l2\n+ (math.log((upper.count(\"C\")) / float(wsize))) / l4\n )\n if \"T\" not in seq:\n term_t = 0\n else:\n term_t = ((upper.count(\"T\")) / float(wsize)) * (\n- (math.log((upper.count(\"T\")) / float(wsize))) / l2\n+ (math.log((upper.count(\"T\")) / float(wsize))) / l4\n )\n if \"G\" not in seq:\n term_g = 0\n else:\n term_g = ((upper.count(\"G\")) / float(wsize)) * (\n- (math.log((upper.count(\"G\")) / float(wsize))) / l2\n+ (math.log((upper.count(\"G\")) / float(wsize))) / l4\n )\n return -(term_a + term_c + term_t + term_g)\n", "issue": "lcc.lcc_mult returns extra zero value\n### Setup\r\n\r\nI am reporting a problem with Biopython version, Python version, and operating\r\nsystem as follows:\r\n\r\n```python\r\nimport sys; print(sys.version)\r\nimport platform; print(platform.python_implementation()); print(platform.platform())\r\nimport Bio; print(Bio.__version__)\r\n```\r\n\r\n[GCC 4.8.5 20150623 (Red Hat 4.8.5-39)]\r\nCPython\r\nLinux-3.10.0-957.1.3.el7.x86_64-x86_64-with-centos-7.6.1810-Core\r\n1.78\r\n\r\n### Expected behaviour\r\n\r\n[1.9056390622295662]\r\n\r\n### Actual behaviour\r\n\r\n[0, 1.9056390622295662]\r\n\r\n### Steps to reproduce\r\n\r\nlcc.lcc_mult('ACGATAGC', 8)\r\n\r\nIn addition according the [article](https://www.researchgate.net/publication/229964618_Sequence_Complexity_and_Composition), the calculation of LCC uses log base 4 but the implementation uses log base 2. That is why for the example sequence 5 in Table 1 the value shown is half the function value.\r\n\n", "before_files": [{"content": "# Copyright 2003, 2007 by Sebastian Bassi. [email protected]\n# All rights reserved.\n#\n# This file is part of the Biopython distribution and governed by your\n# choice of the \"Biopython License Agreement\" or the \"BSD 3-Clause License\".\n# Please see the LICENSE file that should have been included as part of this\n# package.\n\"\"\"Local Composition Complexity.\"\"\"\n\nimport math\n\n\ndef lcc_mult(seq, wsize):\n \"\"\"Calculate Local Composition Complexity (LCC) values over sliding window.\n\n Returns a list of floats, the LCC values for a sliding window over\n the sequence.\n\n seq - an unambiguous DNA sequence (a string or Seq object)\n wsize - window size, integer\n\n The result is the same as applying lcc_simp multiple times, but this\n version is optimized for speed. The optimization works by using the\n value of previous window as a base to compute the next one.\n \"\"\"\n l2 = math.log(2)\n tamseq = len(seq)\n upper = str(seq).upper()\n compone = [0]\n lccsal = [0]\n for i in range(wsize):\n compone.append(\n ((i + 1) / float(wsize)) * ((math.log((i + 1) / float(wsize))) / l2)\n )\n window = seq[0:wsize]\n cant_a = window.count(\"A\")\n cant_c = window.count(\"C\")\n cant_t = window.count(\"T\")\n cant_g = window.count(\"G\")\n term_a = compone[cant_a]\n term_c = compone[cant_c]\n term_t = compone[cant_t]\n term_g = compone[cant_g]\n lccsal.append(-(term_a + term_c + term_t + term_g))\n tail = seq[0]\n for x in range(tamseq - wsize):\n window = upper[x + 1 : wsize + x + 1]\n if tail == window[-1]:\n lccsal.append(lccsal[-1])\n elif tail == \"A\":\n cant_a -= 1\n if window.endswith(\"C\"):\n cant_c += 1\n term_a = compone[cant_a]\n term_c = compone[cant_c]\n lccsal.append(-(term_a + term_c + term_t + term_g))\n elif window.endswith(\"T\"):\n cant_t += 1\n term_a = compone[cant_a]\n term_t = compone[cant_t]\n lccsal.append(-(term_a + term_c + term_t + term_g))\n elif window.endswith(\"G\"):\n cant_g += 1\n term_a = compone[cant_a]\n term_g = compone[cant_g]\n lccsal.append(-(term_a + term_c + term_t + term_g))\n elif tail == \"C\":\n cant_c -= 1\n if window.endswith(\"A\"):\n cant_a += 1\n term_a = compone[cant_a]\n term_c = compone[cant_c]\n lccsal.append(-(term_a + term_c + term_t + term_g))\n elif window.endswith(\"T\"):\n cant_t += 1\n term_c = compone[cant_c]\n term_t = compone[cant_t]\n lccsal.append(-(term_a + term_c + term_t + term_g))\n elif window.endswith(\"G\"):\n cant_g += 1\n term_c = compone[cant_c]\n term_g = compone[cant_g]\n lccsal.append(-(term_a + term_c + term_t + term_g))\n elif tail == \"T\":\n cant_t -= 1\n if window.endswith(\"A\"):\n cant_a += 1\n term_a = compone[cant_a]\n term_t = compone[cant_t]\n lccsal.append(-(term_a + term_c + term_t + term_g))\n elif window.endswith(\"C\"):\n cant_c += 1\n term_c = compone[cant_c]\n term_t = compone[cant_t]\n lccsal.append(-(term_a + term_c + term_t + term_g))\n elif window.endswith(\"G\"):\n cant_g += 1\n term_t = compone[cant_t]\n term_g = compone[cant_g]\n lccsal.append(-(term_a + term_c + term_t + term_g))\n elif tail == \"G\":\n cant_g -= 1\n if window.endswith(\"A\"):\n cant_a += 1\n term_a = compone[cant_a]\n term_g = compone[cant_g]\n lccsal.append(-(term_a + term_c + term_t + term_g))\n elif window.endswith(\"C\"):\n cant_c += 1\n term_c = compone[cant_c]\n term_g = compone[cant_g]\n lccsal.append(-(term_a + term_c + term_t + term_g))\n elif window.endswith(\"T\"):\n cant_t += 1\n term_t = compone[cant_t]\n term_g = compone[cant_g]\n lccsal.append(-(term_a + term_c + term_t + term_g))\n tail = window[0]\n return lccsal\n\n\ndef lcc_simp(seq):\n \"\"\"Calculate Local Composition Complexity (LCC) for a sequence.\n\n seq - an unambiguous DNA sequence (a string or Seq object)\n\n Returns the Local Composition Complexity (LCC) value for the entire\n sequence (as a float).\n\n Reference:\n Andrzej K Konopka (2005) Sequence Complexity and Composition\n https://doi.org/10.1038/npg.els.0005260\n \"\"\"\n wsize = len(seq)\n upper = str(seq).upper()\n l2 = math.log(2)\n if \"A\" not in seq:\n term_a = 0\n # Check to avoid calculating the log of 0.\n else:\n term_a = ((upper.count(\"A\")) / float(wsize)) * (\n (math.log((upper.count(\"A\")) / float(wsize))) / l2\n )\n if \"C\" not in seq:\n term_c = 0\n else:\n term_c = ((upper.count(\"C\")) / float(wsize)) * (\n (math.log((upper.count(\"C\")) / float(wsize))) / l2\n )\n if \"T\" not in seq:\n term_t = 0\n else:\n term_t = ((upper.count(\"T\")) / float(wsize)) * (\n (math.log((upper.count(\"T\")) / float(wsize))) / l2\n )\n if \"G\" not in seq:\n term_g = 0\n else:\n term_g = ((upper.count(\"G\")) / float(wsize)) * (\n (math.log((upper.count(\"G\")) / float(wsize))) / l2\n )\n return -(term_a + term_c + term_t + term_g)\n", "path": "Bio/SeqUtils/lcc.py"}], "after_files": [{"content": "# Copyright 2003, 2007 by Sebastian Bassi. [email protected]\n# All rights reserved.\n#\n# This file is part of the Biopython distribution and governed by your\n# choice of the \"Biopython License Agreement\" or the \"BSD 3-Clause License\".\n# Please see the LICENSE file that should have been included as part of this\n# package.\n\"\"\"Local Composition Complexity.\"\"\"\n\nimport math\n\n\ndef lcc_mult(seq, wsize):\n \"\"\"Calculate Local Composition Complexity (LCC) values over sliding window.\n\n Returns a list of floats, the LCC values for a sliding window over\n the sequence.\n\n seq - an unambiguous DNA sequence (a string or Seq object)\n wsize - window size, integer\n\n The result is the same as applying lcc_simp multiple times, but this\n version is optimized for speed. The optimization works by using the\n value of previous window as a base to compute the next one.\n \"\"\"\n l4 = math.log(4)\n tamseq = len(seq)\n upper = str(seq).upper()\n compone = [0]\n lccsal = []\n for i in range(wsize):\n compone.append(\n ((i + 1) / float(wsize)) * ((math.log((i + 1) / float(wsize))) / l4)\n )\n window = seq[0:wsize]\n cant_a = window.count(\"A\")\n cant_c = window.count(\"C\")\n cant_t = window.count(\"T\")\n cant_g = window.count(\"G\")\n term_a = compone[cant_a]\n term_c = compone[cant_c]\n term_t = compone[cant_t]\n term_g = compone[cant_g]\n lccsal.append(-(term_a + term_c + term_t + term_g))\n tail = seq[0]\n for x in range(tamseq - wsize):\n window = upper[x + 1 : wsize + x + 1]\n if tail == window[-1]:\n lccsal.append(lccsal[-1])\n elif tail == \"A\":\n cant_a -= 1\n if window.endswith(\"C\"):\n cant_c += 1\n term_a = compone[cant_a]\n term_c = compone[cant_c]\n lccsal.append(-(term_a + term_c + term_t + term_g))\n elif window.endswith(\"T\"):\n cant_t += 1\n term_a = compone[cant_a]\n term_t = compone[cant_t]\n lccsal.append(-(term_a + term_c + term_t + term_g))\n elif window.endswith(\"G\"):\n cant_g += 1\n term_a = compone[cant_a]\n term_g = compone[cant_g]\n lccsal.append(-(term_a + term_c + term_t + term_g))\n elif tail == \"C\":\n cant_c -= 1\n if window.endswith(\"A\"):\n cant_a += 1\n term_a = compone[cant_a]\n term_c = compone[cant_c]\n lccsal.append(-(term_a + term_c + term_t + term_g))\n elif window.endswith(\"T\"):\n cant_t += 1\n term_c = compone[cant_c]\n term_t = compone[cant_t]\n lccsal.append(-(term_a + term_c + term_t + term_g))\n elif window.endswith(\"G\"):\n cant_g += 1\n term_c = compone[cant_c]\n term_g = compone[cant_g]\n lccsal.append(-(term_a + term_c + term_t + term_g))\n elif tail == \"T\":\n cant_t -= 1\n if window.endswith(\"A\"):\n cant_a += 1\n term_a = compone[cant_a]\n term_t = compone[cant_t]\n lccsal.append(-(term_a + term_c + term_t + term_g))\n elif window.endswith(\"C\"):\n cant_c += 1\n term_c = compone[cant_c]\n term_t = compone[cant_t]\n lccsal.append(-(term_a + term_c + term_t + term_g))\n elif window.endswith(\"G\"):\n cant_g += 1\n term_t = compone[cant_t]\n term_g = compone[cant_g]\n lccsal.append(-(term_a + term_c + term_t + term_g))\n elif tail == \"G\":\n cant_g -= 1\n if window.endswith(\"A\"):\n cant_a += 1\n term_a = compone[cant_a]\n term_g = compone[cant_g]\n lccsal.append(-(term_a + term_c + term_t + term_g))\n elif window.endswith(\"C\"):\n cant_c += 1\n term_c = compone[cant_c]\n term_g = compone[cant_g]\n lccsal.append(-(term_a + term_c + term_t + term_g))\n elif window.endswith(\"T\"):\n cant_t += 1\n term_t = compone[cant_t]\n term_g = compone[cant_g]\n lccsal.append(-(term_a + term_c + term_t + term_g))\n tail = window[0]\n return lccsal\n\n\ndef lcc_simp(seq):\n \"\"\"Calculate Local Composition Complexity (LCC) for a sequence.\n\n seq - an unambiguous DNA sequence (a string or Seq object)\n\n Returns the Local Composition Complexity (LCC) value for the entire\n sequence (as a float).\n\n Reference:\n Andrzej K Konopka (2005) Sequence Complexity and Composition\n https://doi.org/10.1038/npg.els.0005260\n \"\"\"\n wsize = len(seq)\n upper = str(seq).upper()\n l4 = math.log(4)\n # Check to avoid calculating the log of 0.\n if \"A\" not in seq:\n term_a = 0\n else:\n term_a = ((upper.count(\"A\")) / float(wsize)) * (\n (math.log((upper.count(\"A\")) / float(wsize))) / l4\n )\n if \"C\" not in seq:\n term_c = 0\n else:\n term_c = ((upper.count(\"C\")) / float(wsize)) * (\n (math.log((upper.count(\"C\")) / float(wsize))) / l4\n )\n if \"T\" not in seq:\n term_t = 0\n else:\n term_t = ((upper.count(\"T\")) / float(wsize)) * (\n (math.log((upper.count(\"T\")) / float(wsize))) / l4\n )\n if \"G\" not in seq:\n term_g = 0\n else:\n term_g = ((upper.count(\"G\")) / float(wsize)) * (\n (math.log((upper.count(\"G\")) / float(wsize))) / l4\n )\n return -(term_a + term_c + term_t + term_g)\n", "path": "Bio/SeqUtils/lcc.py"}]}
| 2,488 | 649 |
gh_patches_debug_37593
|
rasdani/github-patches
|
git_diff
|
getmoto__moto-1565
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Authorize Security Group Ingress Only Works with Multi-Rule?
Near as I can tell, I should be able to use a simplified form of authorizing ingress when I only need a single rule; but this doesn't seem to work with Moto. The multi-rule syntax does work, on the other hand.
See these tests:
```
import boto3
import pytest
from moto import mock_ec2
@mock_ec2
def test_security_group_ingress_succeeds():
ec2 = boto3.resource('ec2', 'ca-central-1')
sg = ec2.create_security_group(Description='Test SG',GroupName='test-sg')
assert len(sg.ip_permissions) == 0
sg.authorize_ingress(IpPermissions=[
{
'FromPort': 22,
'ToPort': 22,
'IpProtocol': 'tcp',
'IpRanges': [
{
'CidrIp': '192.168.0.1/32'
}
]
}
])
assert len(sg.ip_permissions) == 1
sg_after = ec2.SecurityGroup(sg.id)
assert len(sg_after.ip_permissions) == 1
@mock_ec2
def test_security_group_ingress_fails_without_multirule():
ec2 = boto3.resource('ec2', 'ca-central-1')
sg = ec2.create_security_group(Description='Test SG', GroupName='test-sg')
assert len(sg.ip_permissions) == 0
sg.authorize_ingress(CidrIp='192.168.0.1/32', FromPort=22, ToPort=22, IpProtocol='tcp')
# Fails
assert len(sg.ip_permissions) == 1
@mock_ec2
def test_security_group_ingress_fails_without_multirule_after_reload():
ec2 = boto3.resource('ec2', 'ca-central-1')
sg = ec2.create_security_group(Description='Test SG', GroupName='test-sg')
assert len(sg.ip_permissions) == 0
sg.authorize_ingress(CidrIp='192.168.0.1/32', FromPort=22, ToPort=22, IpProtocol='tcp')
# Also Fails
sg_after = ec2.SecurityGroup(sg.id)
assert len(sg_after.ip_permissions) == 1
```
The first test, using the multi-rule syntax with the `IpPermission` array, works fine.
The second two tests fail. AFAIK, this syntax is valid, but doesn't work with moto.
I expected all three tests to pass, but they don't. Am I doing something wrong, or is this a Moto defect?
Using moto 1.2.0, installed with pipenv, using python mocks. Both version 1.6.6, installed the same way.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `moto/ec2/responses/security_groups.py`
Content:
```
1 from __future__ import unicode_literals
2
3 from moto.core.responses import BaseResponse
4 from moto.ec2.utils import filters_from_querystring
5
6
7 def try_parse_int(value, default=None):
8 try:
9 return int(value)
10 except (TypeError, ValueError):
11 return default
12
13
14 class SecurityGroups(BaseResponse):
15
16 def _process_rules_from_querystring(self):
17 group_name_or_id = (self._get_param('GroupName') or
18 self._get_param('GroupId'))
19
20 querytree = {}
21 for key, value in self.querystring.items():
22 key_splitted = key.split('.')
23 key_splitted = [try_parse_int(e, e) for e in key_splitted]
24
25 d = querytree
26 for subkey in key_splitted[:-1]:
27 if subkey not in d:
28 d[subkey] = {}
29 d = d[subkey]
30 d[key_splitted[-1]] = value
31
32 ip_permissions = querytree.get('IpPermissions') or {}
33 for ip_permission_idx in sorted(ip_permissions.keys()):
34 ip_permission = ip_permissions[ip_permission_idx]
35
36 ip_protocol = ip_permission.get('IpProtocol', [None])[0]
37 from_port = ip_permission.get('FromPort', [None])[0]
38 to_port = ip_permission.get('ToPort', [None])[0]
39
40 ip_ranges = []
41 ip_ranges_tree = ip_permission.get('IpRanges') or {}
42 for ip_range_idx in sorted(ip_ranges_tree.keys()):
43 ip_ranges.append(ip_ranges_tree[ip_range_idx]['CidrIp'][0])
44
45 source_groups = []
46 source_group_ids = []
47 groups_tree = ip_permission.get('Groups') or {}
48 for group_idx in sorted(groups_tree.keys()):
49 group_dict = groups_tree[group_idx]
50 if 'GroupId' in group_dict:
51 source_group_ids.append(group_dict['GroupId'][0])
52 elif 'GroupName' in group_dict:
53 source_groups.append(group_dict['GroupName'][0])
54
55 yield (group_name_or_id, ip_protocol, from_port, to_port, ip_ranges,
56 source_groups, source_group_ids)
57
58 def authorize_security_group_egress(self):
59 if self.is_not_dryrun('GrantSecurityGroupEgress'):
60 for args in self._process_rules_from_querystring():
61 self.ec2_backend.authorize_security_group_egress(*args)
62 return AUTHORIZE_SECURITY_GROUP_EGRESS_RESPONSE
63
64 def authorize_security_group_ingress(self):
65 if self.is_not_dryrun('GrantSecurityGroupIngress'):
66 for args in self._process_rules_from_querystring():
67 self.ec2_backend.authorize_security_group_ingress(*args)
68 return AUTHORIZE_SECURITY_GROUP_INGRESS_REPONSE
69
70 def create_security_group(self):
71 name = self._get_param('GroupName')
72 description = self._get_param('GroupDescription')
73 vpc_id = self._get_param('VpcId')
74
75 if self.is_not_dryrun('CreateSecurityGroup'):
76 group = self.ec2_backend.create_security_group(
77 name, description, vpc_id=vpc_id)
78 template = self.response_template(CREATE_SECURITY_GROUP_RESPONSE)
79 return template.render(group=group)
80
81 def delete_security_group(self):
82 # TODO this should raise an error if there are instances in the group.
83 # See
84 # http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DeleteSecurityGroup.html
85
86 name = self._get_param('GroupName')
87 sg_id = self._get_param('GroupId')
88
89 if self.is_not_dryrun('DeleteSecurityGroup'):
90 if name:
91 self.ec2_backend.delete_security_group(name)
92 elif sg_id:
93 self.ec2_backend.delete_security_group(group_id=sg_id)
94
95 return DELETE_GROUP_RESPONSE
96
97 def describe_security_groups(self):
98 groupnames = self._get_multi_param("GroupName")
99 group_ids = self._get_multi_param("GroupId")
100 filters = filters_from_querystring(self.querystring)
101
102 groups = self.ec2_backend.describe_security_groups(
103 group_ids=group_ids,
104 groupnames=groupnames,
105 filters=filters
106 )
107
108 template = self.response_template(DESCRIBE_SECURITY_GROUPS_RESPONSE)
109 return template.render(groups=groups)
110
111 def revoke_security_group_egress(self):
112 if self.is_not_dryrun('RevokeSecurityGroupEgress'):
113 for args in self._process_rules_from_querystring():
114 success = self.ec2_backend.revoke_security_group_egress(*args)
115 if not success:
116 return "Could not find a matching egress rule", dict(status=404)
117 return REVOKE_SECURITY_GROUP_EGRESS_RESPONSE
118
119 def revoke_security_group_ingress(self):
120 if self.is_not_dryrun('RevokeSecurityGroupIngress'):
121 for args in self._process_rules_from_querystring():
122 self.ec2_backend.revoke_security_group_ingress(*args)
123 return REVOKE_SECURITY_GROUP_INGRESS_REPONSE
124
125
126 CREATE_SECURITY_GROUP_RESPONSE = """<CreateSecurityGroupResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
127 <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
128 <return>true</return>
129 <groupId>{{ group.id }}</groupId>
130 </CreateSecurityGroupResponse>"""
131
132 DELETE_GROUP_RESPONSE = """<DeleteSecurityGroupResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
133 <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
134 <return>true</return>
135 </DeleteSecurityGroupResponse>"""
136
137 DESCRIBE_SECURITY_GROUPS_RESPONSE = """<DescribeSecurityGroupsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
138 <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
139 <securityGroupInfo>
140 {% for group in groups %}
141 <item>
142 <ownerId>123456789012</ownerId>
143 <groupId>{{ group.id }}</groupId>
144 <groupName>{{ group.name }}</groupName>
145 <groupDescription>{{ group.description }}</groupDescription>
146 {% if group.vpc_id %}
147 <vpcId>{{ group.vpc_id }}</vpcId>
148 {% endif %}
149 <ipPermissions>
150 {% for rule in group.ingress_rules %}
151 <item>
152 <ipProtocol>{{ rule.ip_protocol }}</ipProtocol>
153 {% if rule.from_port %}
154 <fromPort>{{ rule.from_port }}</fromPort>
155 {% endif %}
156 {% if rule.to_port %}
157 <toPort>{{ rule.to_port }}</toPort>
158 {% endif %}
159 <groups>
160 {% for source_group in rule.source_groups %}
161 <item>
162 <userId>123456789012</userId>
163 <groupId>{{ source_group.id }}</groupId>
164 <groupName>{{ source_group.name }}</groupName>
165 </item>
166 {% endfor %}
167 </groups>
168 <ipRanges>
169 {% for ip_range in rule.ip_ranges %}
170 <item>
171 <cidrIp>{{ ip_range }}</cidrIp>
172 </item>
173 {% endfor %}
174 </ipRanges>
175 </item>
176 {% endfor %}
177 </ipPermissions>
178 <ipPermissionsEgress>
179 {% for rule in group.egress_rules %}
180 <item>
181 <ipProtocol>{{ rule.ip_protocol }}</ipProtocol>
182 <fromPort>{{ rule.from_port }}</fromPort>
183 <toPort>{{ rule.to_port }}</toPort>
184 <groups>
185 {% for source_group in rule.source_groups %}
186 <item>
187 <userId>123456789012</userId>
188 <groupId>{{ source_group.id }}</groupId>
189 <groupName>{{ source_group.name }}</groupName>
190 </item>
191 {% endfor %}
192 </groups>
193 <ipRanges>
194 {% for ip_range in rule.ip_ranges %}
195 <item>
196 <cidrIp>{{ ip_range }}</cidrIp>
197 </item>
198 {% endfor %}
199 </ipRanges>
200 </item>
201 {% endfor %}
202 </ipPermissionsEgress>
203 <tagSet>
204 {% for tag in group.get_tags() %}
205 <item>
206 <resourceId>{{ tag.resource_id }}</resourceId>
207 <resourceType>{{ tag.resource_type }}</resourceType>
208 <key>{{ tag.key }}</key>
209 <value>{{ tag.value }}</value>
210 </item>
211 {% endfor %}
212 </tagSet>
213 </item>
214 {% endfor %}
215 </securityGroupInfo>
216 </DescribeSecurityGroupsResponse>"""
217
218 AUTHORIZE_SECURITY_GROUP_INGRESS_REPONSE = """<AuthorizeSecurityGroupIngressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
219 <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
220 <return>true</return>
221 </AuthorizeSecurityGroupIngressResponse>"""
222
223 REVOKE_SECURITY_GROUP_INGRESS_REPONSE = """<RevokeSecurityGroupIngressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
224 <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
225 <return>true</return>
226 </RevokeSecurityGroupIngressResponse>"""
227
228 AUTHORIZE_SECURITY_GROUP_EGRESS_RESPONSE = """
229 <AuthorizeSecurityGroupEgressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
230 <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
231 <return>true</return>
232 </AuthorizeSecurityGroupEgressResponse>"""
233
234 REVOKE_SECURITY_GROUP_EGRESS_RESPONSE = """<RevokeSecurityGroupEgressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
235 <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
236 <return>true</return>
237 </RevokeSecurityGroupEgressResponse>"""
238
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/moto/ec2/responses/security_groups.py b/moto/ec2/responses/security_groups.py
--- a/moto/ec2/responses/security_groups.py
+++ b/moto/ec2/responses/security_groups.py
@@ -11,6 +11,29 @@
return default
+def parse_sg_attributes_from_dict(sg_attributes):
+ ip_protocol = sg_attributes.get('IpProtocol', [None])[0]
+ from_port = sg_attributes.get('FromPort', [None])[0]
+ to_port = sg_attributes.get('ToPort', [None])[0]
+
+ ip_ranges = []
+ ip_ranges_tree = sg_attributes.get('IpRanges') or {}
+ for ip_range_idx in sorted(ip_ranges_tree.keys()):
+ ip_ranges.append(ip_ranges_tree[ip_range_idx]['CidrIp'][0])
+
+ source_groups = []
+ source_group_ids = []
+ groups_tree = sg_attributes.get('Groups') or {}
+ for group_idx in sorted(groups_tree.keys()):
+ group_dict = groups_tree[group_idx]
+ if 'GroupId' in group_dict:
+ source_group_ids.append(group_dict['GroupId'][0])
+ elif 'GroupName' in group_dict:
+ source_groups.append(group_dict['GroupName'][0])
+
+ return ip_protocol, from_port, to_port, ip_ranges, source_groups, source_group_ids
+
+
class SecurityGroups(BaseResponse):
def _process_rules_from_querystring(self):
@@ -29,28 +52,17 @@
d = d[subkey]
d[key_splitted[-1]] = value
+ if 'IpPermissions' not in querytree:
+ # Handle single rule syntax
+ ip_protocol, from_port, to_port, ip_ranges, source_groups, source_group_ids = parse_sg_attributes_from_dict(querytree)
+ yield (group_name_or_id, ip_protocol, from_port, to_port, ip_ranges,
+ source_groups, source_group_ids)
+
ip_permissions = querytree.get('IpPermissions') or {}
for ip_permission_idx in sorted(ip_permissions.keys()):
ip_permission = ip_permissions[ip_permission_idx]
- ip_protocol = ip_permission.get('IpProtocol', [None])[0]
- from_port = ip_permission.get('FromPort', [None])[0]
- to_port = ip_permission.get('ToPort', [None])[0]
-
- ip_ranges = []
- ip_ranges_tree = ip_permission.get('IpRanges') or {}
- for ip_range_idx in sorted(ip_ranges_tree.keys()):
- ip_ranges.append(ip_ranges_tree[ip_range_idx]['CidrIp'][0])
-
- source_groups = []
- source_group_ids = []
- groups_tree = ip_permission.get('Groups') or {}
- for group_idx in sorted(groups_tree.keys()):
- group_dict = groups_tree[group_idx]
- if 'GroupId' in group_dict:
- source_group_ids.append(group_dict['GroupId'][0])
- elif 'GroupName' in group_dict:
- source_groups.append(group_dict['GroupName'][0])
+ ip_protocol, from_port, to_port, ip_ranges, source_groups, source_group_ids = parse_sg_attributes_from_dict(ip_permission)
yield (group_name_or_id, ip_protocol, from_port, to_port, ip_ranges,
source_groups, source_group_ids)
|
{"golden_diff": "diff --git a/moto/ec2/responses/security_groups.py b/moto/ec2/responses/security_groups.py\n--- a/moto/ec2/responses/security_groups.py\n+++ b/moto/ec2/responses/security_groups.py\n@@ -11,6 +11,29 @@\n return default\n \n \n+def parse_sg_attributes_from_dict(sg_attributes):\n+ ip_protocol = sg_attributes.get('IpProtocol', [None])[0]\n+ from_port = sg_attributes.get('FromPort', [None])[0]\n+ to_port = sg_attributes.get('ToPort', [None])[0]\n+\n+ ip_ranges = []\n+ ip_ranges_tree = sg_attributes.get('IpRanges') or {}\n+ for ip_range_idx in sorted(ip_ranges_tree.keys()):\n+ ip_ranges.append(ip_ranges_tree[ip_range_idx]['CidrIp'][0])\n+\n+ source_groups = []\n+ source_group_ids = []\n+ groups_tree = sg_attributes.get('Groups') or {}\n+ for group_idx in sorted(groups_tree.keys()):\n+ group_dict = groups_tree[group_idx]\n+ if 'GroupId' in group_dict:\n+ source_group_ids.append(group_dict['GroupId'][0])\n+ elif 'GroupName' in group_dict:\n+ source_groups.append(group_dict['GroupName'][0])\n+\n+ return ip_protocol, from_port, to_port, ip_ranges, source_groups, source_group_ids\n+\n+\n class SecurityGroups(BaseResponse):\n \n def _process_rules_from_querystring(self):\n@@ -29,28 +52,17 @@\n d = d[subkey]\n d[key_splitted[-1]] = value\n \n+ if 'IpPermissions' not in querytree:\n+ # Handle single rule syntax\n+ ip_protocol, from_port, to_port, ip_ranges, source_groups, source_group_ids = parse_sg_attributes_from_dict(querytree)\n+ yield (group_name_or_id, ip_protocol, from_port, to_port, ip_ranges,\n+ source_groups, source_group_ids)\n+\n ip_permissions = querytree.get('IpPermissions') or {}\n for ip_permission_idx in sorted(ip_permissions.keys()):\n ip_permission = ip_permissions[ip_permission_idx]\n \n- ip_protocol = ip_permission.get('IpProtocol', [None])[0]\n- from_port = ip_permission.get('FromPort', [None])[0]\n- to_port = ip_permission.get('ToPort', [None])[0]\n-\n- ip_ranges = []\n- ip_ranges_tree = ip_permission.get('IpRanges') or {}\n- for ip_range_idx in sorted(ip_ranges_tree.keys()):\n- ip_ranges.append(ip_ranges_tree[ip_range_idx]['CidrIp'][0])\n-\n- source_groups = []\n- source_group_ids = []\n- groups_tree = ip_permission.get('Groups') or {}\n- for group_idx in sorted(groups_tree.keys()):\n- group_dict = groups_tree[group_idx]\n- if 'GroupId' in group_dict:\n- source_group_ids.append(group_dict['GroupId'][0])\n- elif 'GroupName' in group_dict:\n- source_groups.append(group_dict['GroupName'][0])\n+ ip_protocol, from_port, to_port, ip_ranges, source_groups, source_group_ids = parse_sg_attributes_from_dict(ip_permission)\n \n yield (group_name_or_id, ip_protocol, from_port, to_port, ip_ranges,\n source_groups, source_group_ids)\n", "issue": "Authorize Security Group Ingress Only Works with Multi-Rule?\nNear as I can tell, I should be able to use a simplified form of authorizing ingress when I only need a single rule; but this doesn't seem to work with Moto. The multi-rule syntax does work, on the other hand.\r\n\r\nSee these tests:\r\n\r\n```\r\nimport boto3\r\nimport pytest\r\n\r\nfrom moto import mock_ec2\r\n\r\n@mock_ec2\r\ndef test_security_group_ingress_succeeds():\r\n ec2 = boto3.resource('ec2', 'ca-central-1')\r\n sg = ec2.create_security_group(Description='Test SG',GroupName='test-sg')\r\n\r\n assert len(sg.ip_permissions) == 0\r\n sg.authorize_ingress(IpPermissions=[\r\n {\r\n 'FromPort': 22,\r\n 'ToPort': 22,\r\n 'IpProtocol': 'tcp',\r\n 'IpRanges': [\r\n {\r\n 'CidrIp': '192.168.0.1/32'\r\n }\r\n ]\r\n }\r\n ])\r\n\r\n assert len(sg.ip_permissions) == 1\r\n\r\n sg_after = ec2.SecurityGroup(sg.id)\r\n assert len(sg_after.ip_permissions) == 1\r\n\r\n\r\n@mock_ec2\r\ndef test_security_group_ingress_fails_without_multirule():\r\n ec2 = boto3.resource('ec2', 'ca-central-1')\r\n sg = ec2.create_security_group(Description='Test SG', GroupName='test-sg')\r\n\r\n assert len(sg.ip_permissions) == 0\r\n sg.authorize_ingress(CidrIp='192.168.0.1/32', FromPort=22, ToPort=22, IpProtocol='tcp')\r\n\r\n # Fails\r\n assert len(sg.ip_permissions) == 1\r\n\r\n\r\n@mock_ec2\r\ndef test_security_group_ingress_fails_without_multirule_after_reload():\r\n ec2 = boto3.resource('ec2', 'ca-central-1')\r\n sg = ec2.create_security_group(Description='Test SG', GroupName='test-sg')\r\n\r\n assert len(sg.ip_permissions) == 0\r\n sg.authorize_ingress(CidrIp='192.168.0.1/32', FromPort=22, ToPort=22, IpProtocol='tcp')\r\n\r\n # Also Fails\r\n sg_after = ec2.SecurityGroup(sg.id)\r\n assert len(sg_after.ip_permissions) == 1\r\n```\r\n\r\nThe first test, using the multi-rule syntax with the `IpPermission` array, works fine.\r\n\r\nThe second two tests fail. AFAIK, this syntax is valid, but doesn't work with moto.\r\n\r\nI expected all three tests to pass, but they don't. Am I doing something wrong, or is this a Moto defect?\r\n\r\nUsing moto 1.2.0, installed with pipenv, using python mocks. Both version 1.6.6, installed the same way.\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom moto.core.responses import BaseResponse\nfrom moto.ec2.utils import filters_from_querystring\n\n\ndef try_parse_int(value, default=None):\n try:\n return int(value)\n except (TypeError, ValueError):\n return default\n\n\nclass SecurityGroups(BaseResponse):\n\n def _process_rules_from_querystring(self):\n group_name_or_id = (self._get_param('GroupName') or\n self._get_param('GroupId'))\n\n querytree = {}\n for key, value in self.querystring.items():\n key_splitted = key.split('.')\n key_splitted = [try_parse_int(e, e) for e in key_splitted]\n\n d = querytree\n for subkey in key_splitted[:-1]:\n if subkey not in d:\n d[subkey] = {}\n d = d[subkey]\n d[key_splitted[-1]] = value\n\n ip_permissions = querytree.get('IpPermissions') or {}\n for ip_permission_idx in sorted(ip_permissions.keys()):\n ip_permission = ip_permissions[ip_permission_idx]\n\n ip_protocol = ip_permission.get('IpProtocol', [None])[0]\n from_port = ip_permission.get('FromPort', [None])[0]\n to_port = ip_permission.get('ToPort', [None])[0]\n\n ip_ranges = []\n ip_ranges_tree = ip_permission.get('IpRanges') or {}\n for ip_range_idx in sorted(ip_ranges_tree.keys()):\n ip_ranges.append(ip_ranges_tree[ip_range_idx]['CidrIp'][0])\n\n source_groups = []\n source_group_ids = []\n groups_tree = ip_permission.get('Groups') or {}\n for group_idx in sorted(groups_tree.keys()):\n group_dict = groups_tree[group_idx]\n if 'GroupId' in group_dict:\n source_group_ids.append(group_dict['GroupId'][0])\n elif 'GroupName' in group_dict:\n source_groups.append(group_dict['GroupName'][0])\n\n yield (group_name_or_id, ip_protocol, from_port, to_port, ip_ranges,\n source_groups, source_group_ids)\n\n def authorize_security_group_egress(self):\n if self.is_not_dryrun('GrantSecurityGroupEgress'):\n for args in self._process_rules_from_querystring():\n self.ec2_backend.authorize_security_group_egress(*args)\n return AUTHORIZE_SECURITY_GROUP_EGRESS_RESPONSE\n\n def authorize_security_group_ingress(self):\n if self.is_not_dryrun('GrantSecurityGroupIngress'):\n for args in self._process_rules_from_querystring():\n self.ec2_backend.authorize_security_group_ingress(*args)\n return AUTHORIZE_SECURITY_GROUP_INGRESS_REPONSE\n\n def create_security_group(self):\n name = self._get_param('GroupName')\n description = self._get_param('GroupDescription')\n vpc_id = self._get_param('VpcId')\n\n if self.is_not_dryrun('CreateSecurityGroup'):\n group = self.ec2_backend.create_security_group(\n name, description, vpc_id=vpc_id)\n template = self.response_template(CREATE_SECURITY_GROUP_RESPONSE)\n return template.render(group=group)\n\n def delete_security_group(self):\n # TODO this should raise an error if there are instances in the group.\n # See\n # http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DeleteSecurityGroup.html\n\n name = self._get_param('GroupName')\n sg_id = self._get_param('GroupId')\n\n if self.is_not_dryrun('DeleteSecurityGroup'):\n if name:\n self.ec2_backend.delete_security_group(name)\n elif sg_id:\n self.ec2_backend.delete_security_group(group_id=sg_id)\n\n return DELETE_GROUP_RESPONSE\n\n def describe_security_groups(self):\n groupnames = self._get_multi_param(\"GroupName\")\n group_ids = self._get_multi_param(\"GroupId\")\n filters = filters_from_querystring(self.querystring)\n\n groups = self.ec2_backend.describe_security_groups(\n group_ids=group_ids,\n groupnames=groupnames,\n filters=filters\n )\n\n template = self.response_template(DESCRIBE_SECURITY_GROUPS_RESPONSE)\n return template.render(groups=groups)\n\n def revoke_security_group_egress(self):\n if self.is_not_dryrun('RevokeSecurityGroupEgress'):\n for args in self._process_rules_from_querystring():\n success = self.ec2_backend.revoke_security_group_egress(*args)\n if not success:\n return \"Could not find a matching egress rule\", dict(status=404)\n return REVOKE_SECURITY_GROUP_EGRESS_RESPONSE\n\n def revoke_security_group_ingress(self):\n if self.is_not_dryrun('RevokeSecurityGroupIngress'):\n for args in self._process_rules_from_querystring():\n self.ec2_backend.revoke_security_group_ingress(*args)\n return REVOKE_SECURITY_GROUP_INGRESS_REPONSE\n\n\nCREATE_SECURITY_GROUP_RESPONSE = \"\"\"<CreateSecurityGroupResponse xmlns=\"http://ec2.amazonaws.com/doc/2013-10-15/\">\n <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>\n <return>true</return>\n <groupId>{{ group.id }}</groupId>\n</CreateSecurityGroupResponse>\"\"\"\n\nDELETE_GROUP_RESPONSE = \"\"\"<DeleteSecurityGroupResponse xmlns=\"http://ec2.amazonaws.com/doc/2013-10-15/\">\n <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>\n <return>true</return>\n</DeleteSecurityGroupResponse>\"\"\"\n\nDESCRIBE_SECURITY_GROUPS_RESPONSE = \"\"\"<DescribeSecurityGroupsResponse xmlns=\"http://ec2.amazonaws.com/doc/2013-10-15/\">\n <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>\n <securityGroupInfo>\n {% for group in groups %}\n <item>\n <ownerId>123456789012</ownerId>\n <groupId>{{ group.id }}</groupId>\n <groupName>{{ group.name }}</groupName>\n <groupDescription>{{ group.description }}</groupDescription>\n {% if group.vpc_id %}\n <vpcId>{{ group.vpc_id }}</vpcId>\n {% endif %}\n <ipPermissions>\n {% for rule in group.ingress_rules %}\n <item>\n <ipProtocol>{{ rule.ip_protocol }}</ipProtocol>\n {% if rule.from_port %}\n <fromPort>{{ rule.from_port }}</fromPort>\n {% endif %}\n {% if rule.to_port %}\n <toPort>{{ rule.to_port }}</toPort>\n {% endif %}\n <groups>\n {% for source_group in rule.source_groups %}\n <item>\n <userId>123456789012</userId>\n <groupId>{{ source_group.id }}</groupId>\n <groupName>{{ source_group.name }}</groupName>\n </item>\n {% endfor %}\n </groups>\n <ipRanges>\n {% for ip_range in rule.ip_ranges %}\n <item>\n <cidrIp>{{ ip_range }}</cidrIp>\n </item>\n {% endfor %}\n </ipRanges>\n </item>\n {% endfor %}\n </ipPermissions>\n <ipPermissionsEgress>\n {% for rule in group.egress_rules %}\n <item>\n <ipProtocol>{{ rule.ip_protocol }}</ipProtocol>\n <fromPort>{{ rule.from_port }}</fromPort>\n <toPort>{{ rule.to_port }}</toPort>\n <groups>\n {% for source_group in rule.source_groups %}\n <item>\n <userId>123456789012</userId>\n <groupId>{{ source_group.id }}</groupId>\n <groupName>{{ source_group.name }}</groupName>\n </item>\n {% endfor %}\n </groups>\n <ipRanges>\n {% for ip_range in rule.ip_ranges %}\n <item>\n <cidrIp>{{ ip_range }}</cidrIp>\n </item>\n {% endfor %}\n </ipRanges>\n </item>\n {% endfor %}\n </ipPermissionsEgress>\n <tagSet>\n {% for tag in group.get_tags() %}\n <item>\n <resourceId>{{ tag.resource_id }}</resourceId>\n <resourceType>{{ tag.resource_type }}</resourceType>\n <key>{{ tag.key }}</key>\n <value>{{ tag.value }}</value>\n </item>\n {% endfor %}\n </tagSet>\n </item>\n {% endfor %}\n </securityGroupInfo>\n</DescribeSecurityGroupsResponse>\"\"\"\n\nAUTHORIZE_SECURITY_GROUP_INGRESS_REPONSE = \"\"\"<AuthorizeSecurityGroupIngressResponse xmlns=\"http://ec2.amazonaws.com/doc/2013-10-15/\">\n <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>\n <return>true</return>\n</AuthorizeSecurityGroupIngressResponse>\"\"\"\n\nREVOKE_SECURITY_GROUP_INGRESS_REPONSE = \"\"\"<RevokeSecurityGroupIngressResponse xmlns=\"http://ec2.amazonaws.com/doc/2013-10-15/\">\n <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>\n <return>true</return>\n</RevokeSecurityGroupIngressResponse>\"\"\"\n\nAUTHORIZE_SECURITY_GROUP_EGRESS_RESPONSE = \"\"\"\n<AuthorizeSecurityGroupEgressResponse xmlns=\"http://ec2.amazonaws.com/doc/2013-10-15/\">\n <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>\n <return>true</return>\n</AuthorizeSecurityGroupEgressResponse>\"\"\"\n\nREVOKE_SECURITY_GROUP_EGRESS_RESPONSE = \"\"\"<RevokeSecurityGroupEgressResponse xmlns=\"http://ec2.amazonaws.com/doc/2013-10-15/\">\n <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>\n <return>true</return>\n</RevokeSecurityGroupEgressResponse>\"\"\"\n", "path": "moto/ec2/responses/security_groups.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nfrom moto.core.responses import BaseResponse\nfrom moto.ec2.utils import filters_from_querystring\n\n\ndef try_parse_int(value, default=None):\n try:\n return int(value)\n except (TypeError, ValueError):\n return default\n\n\ndef parse_sg_attributes_from_dict(sg_attributes):\n ip_protocol = sg_attributes.get('IpProtocol', [None])[0]\n from_port = sg_attributes.get('FromPort', [None])[0]\n to_port = sg_attributes.get('ToPort', [None])[0]\n\n ip_ranges = []\n ip_ranges_tree = sg_attributes.get('IpRanges') or {}\n for ip_range_idx in sorted(ip_ranges_tree.keys()):\n ip_ranges.append(ip_ranges_tree[ip_range_idx]['CidrIp'][0])\n\n source_groups = []\n source_group_ids = []\n groups_tree = sg_attributes.get('Groups') or {}\n for group_idx in sorted(groups_tree.keys()):\n group_dict = groups_tree[group_idx]\n if 'GroupId' in group_dict:\n source_group_ids.append(group_dict['GroupId'][0])\n elif 'GroupName' in group_dict:\n source_groups.append(group_dict['GroupName'][0])\n\n return ip_protocol, from_port, to_port, ip_ranges, source_groups, source_group_ids\n\n\nclass SecurityGroups(BaseResponse):\n\n def _process_rules_from_querystring(self):\n group_name_or_id = (self._get_param('GroupName') or\n self._get_param('GroupId'))\n\n querytree = {}\n for key, value in self.querystring.items():\n key_splitted = key.split('.')\n key_splitted = [try_parse_int(e, e) for e in key_splitted]\n\n d = querytree\n for subkey in key_splitted[:-1]:\n if subkey not in d:\n d[subkey] = {}\n d = d[subkey]\n d[key_splitted[-1]] = value\n\n if 'IpPermissions' not in querytree:\n # Handle single rule syntax\n ip_protocol, from_port, to_port, ip_ranges, source_groups, source_group_ids = parse_sg_attributes_from_dict(querytree)\n yield (group_name_or_id, ip_protocol, from_port, to_port, ip_ranges,\n source_groups, source_group_ids)\n\n ip_permissions = querytree.get('IpPermissions') or {}\n for ip_permission_idx in sorted(ip_permissions.keys()):\n ip_permission = ip_permissions[ip_permission_idx]\n\n ip_protocol, from_port, to_port, ip_ranges, source_groups, source_group_ids = parse_sg_attributes_from_dict(ip_permission)\n\n yield (group_name_or_id, ip_protocol, from_port, to_port, ip_ranges,\n source_groups, source_group_ids)\n\n def authorize_security_group_egress(self):\n if self.is_not_dryrun('GrantSecurityGroupEgress'):\n for args in self._process_rules_from_querystring():\n self.ec2_backend.authorize_security_group_egress(*args)\n return AUTHORIZE_SECURITY_GROUP_EGRESS_RESPONSE\n\n def authorize_security_group_ingress(self):\n if self.is_not_dryrun('GrantSecurityGroupIngress'):\n for args in self._process_rules_from_querystring():\n self.ec2_backend.authorize_security_group_ingress(*args)\n return AUTHORIZE_SECURITY_GROUP_INGRESS_REPONSE\n\n def create_security_group(self):\n name = self._get_param('GroupName')\n description = self._get_param('GroupDescription')\n vpc_id = self._get_param('VpcId')\n\n if self.is_not_dryrun('CreateSecurityGroup'):\n group = self.ec2_backend.create_security_group(\n name, description, vpc_id=vpc_id)\n template = self.response_template(CREATE_SECURITY_GROUP_RESPONSE)\n return template.render(group=group)\n\n def delete_security_group(self):\n # TODO this should raise an error if there are instances in the group.\n # See\n # http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DeleteSecurityGroup.html\n\n name = self._get_param('GroupName')\n sg_id = self._get_param('GroupId')\n\n if self.is_not_dryrun('DeleteSecurityGroup'):\n if name:\n self.ec2_backend.delete_security_group(name)\n elif sg_id:\n self.ec2_backend.delete_security_group(group_id=sg_id)\n\n return DELETE_GROUP_RESPONSE\n\n def describe_security_groups(self):\n groupnames = self._get_multi_param(\"GroupName\")\n group_ids = self._get_multi_param(\"GroupId\")\n filters = filters_from_querystring(self.querystring)\n\n groups = self.ec2_backend.describe_security_groups(\n group_ids=group_ids,\n groupnames=groupnames,\n filters=filters\n )\n\n template = self.response_template(DESCRIBE_SECURITY_GROUPS_RESPONSE)\n return template.render(groups=groups)\n\n def revoke_security_group_egress(self):\n if self.is_not_dryrun('RevokeSecurityGroupEgress'):\n for args in self._process_rules_from_querystring():\n success = self.ec2_backend.revoke_security_group_egress(*args)\n if not success:\n return \"Could not find a matching egress rule\", dict(status=404)\n return REVOKE_SECURITY_GROUP_EGRESS_RESPONSE\n\n def revoke_security_group_ingress(self):\n if self.is_not_dryrun('RevokeSecurityGroupIngress'):\n for args in self._process_rules_from_querystring():\n self.ec2_backend.revoke_security_group_ingress(*args)\n return REVOKE_SECURITY_GROUP_INGRESS_REPONSE\n\n\nCREATE_SECURITY_GROUP_RESPONSE = \"\"\"<CreateSecurityGroupResponse xmlns=\"http://ec2.amazonaws.com/doc/2013-10-15/\">\n <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>\n <return>true</return>\n <groupId>{{ group.id }}</groupId>\n</CreateSecurityGroupResponse>\"\"\"\n\nDELETE_GROUP_RESPONSE = \"\"\"<DeleteSecurityGroupResponse xmlns=\"http://ec2.amazonaws.com/doc/2013-10-15/\">\n <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>\n <return>true</return>\n</DeleteSecurityGroupResponse>\"\"\"\n\nDESCRIBE_SECURITY_GROUPS_RESPONSE = \"\"\"<DescribeSecurityGroupsResponse xmlns=\"http://ec2.amazonaws.com/doc/2013-10-15/\">\n <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>\n <securityGroupInfo>\n {% for group in groups %}\n <item>\n <ownerId>123456789012</ownerId>\n <groupId>{{ group.id }}</groupId>\n <groupName>{{ group.name }}</groupName>\n <groupDescription>{{ group.description }}</groupDescription>\n {% if group.vpc_id %}\n <vpcId>{{ group.vpc_id }}</vpcId>\n {% endif %}\n <ipPermissions>\n {% for rule in group.ingress_rules %}\n <item>\n <ipProtocol>{{ rule.ip_protocol }}</ipProtocol>\n {% if rule.from_port %}\n <fromPort>{{ rule.from_port }}</fromPort>\n {% endif %}\n {% if rule.to_port %}\n <toPort>{{ rule.to_port }}</toPort>\n {% endif %}\n <groups>\n {% for source_group in rule.source_groups %}\n <item>\n <userId>123456789012</userId>\n <groupId>{{ source_group.id }}</groupId>\n <groupName>{{ source_group.name }}</groupName>\n </item>\n {% endfor %}\n </groups>\n <ipRanges>\n {% for ip_range in rule.ip_ranges %}\n <item>\n <cidrIp>{{ ip_range }}</cidrIp>\n </item>\n {% endfor %}\n </ipRanges>\n </item>\n {% endfor %}\n </ipPermissions>\n <ipPermissionsEgress>\n {% for rule in group.egress_rules %}\n <item>\n <ipProtocol>{{ rule.ip_protocol }}</ipProtocol>\n <fromPort>{{ rule.from_port }}</fromPort>\n <toPort>{{ rule.to_port }}</toPort>\n <groups>\n {% for source_group in rule.source_groups %}\n <item>\n <userId>123456789012</userId>\n <groupId>{{ source_group.id }}</groupId>\n <groupName>{{ source_group.name }}</groupName>\n </item>\n {% endfor %}\n </groups>\n <ipRanges>\n {% for ip_range in rule.ip_ranges %}\n <item>\n <cidrIp>{{ ip_range }}</cidrIp>\n </item>\n {% endfor %}\n </ipRanges>\n </item>\n {% endfor %}\n </ipPermissionsEgress>\n <tagSet>\n {% for tag in group.get_tags() %}\n <item>\n <resourceId>{{ tag.resource_id }}</resourceId>\n <resourceType>{{ tag.resource_type }}</resourceType>\n <key>{{ tag.key }}</key>\n <value>{{ tag.value }}</value>\n </item>\n {% endfor %}\n </tagSet>\n </item>\n {% endfor %}\n </securityGroupInfo>\n</DescribeSecurityGroupsResponse>\"\"\"\n\nAUTHORIZE_SECURITY_GROUP_INGRESS_REPONSE = \"\"\"<AuthorizeSecurityGroupIngressResponse xmlns=\"http://ec2.amazonaws.com/doc/2013-10-15/\">\n <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>\n <return>true</return>\n</AuthorizeSecurityGroupIngressResponse>\"\"\"\n\nREVOKE_SECURITY_GROUP_INGRESS_REPONSE = \"\"\"<RevokeSecurityGroupIngressResponse xmlns=\"http://ec2.amazonaws.com/doc/2013-10-15/\">\n <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>\n <return>true</return>\n</RevokeSecurityGroupIngressResponse>\"\"\"\n\nAUTHORIZE_SECURITY_GROUP_EGRESS_RESPONSE = \"\"\"\n<AuthorizeSecurityGroupEgressResponse xmlns=\"http://ec2.amazonaws.com/doc/2013-10-15/\">\n <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>\n <return>true</return>\n</AuthorizeSecurityGroupEgressResponse>\"\"\"\n\nREVOKE_SECURITY_GROUP_EGRESS_RESPONSE = \"\"\"<RevokeSecurityGroupEgressResponse xmlns=\"http://ec2.amazonaws.com/doc/2013-10-15/\">\n <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>\n <return>true</return>\n</RevokeSecurityGroupEgressResponse>\"\"\"\n", "path": "moto/ec2/responses/security_groups.py"}]}
| 3,742 | 721 |
gh_patches_debug_18313
|
rasdani/github-patches
|
git_diff
|
docker__docker-py-575
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
docker.utils.decorators.check_resource problem
When use `docker_client.start(**kwargs)` to start the container, will be raise follow exception:
```
c.start(**s_kwargs)
File "/home/simplecloud/shiyanlou/env/local/lib/python2.7/site-packages/docker/utils/decorators.py", line 12, in wrapped
return f(self, resource_id, *args, **kwargs)
TypeError: start() got multiple values for keyword argument 'container'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/utils/decorators.py`
Content:
```
1 from .. import errors
2
3
4 def check_resource(f):
5 def wrapped(self, resource_id=None, *args, **kwargs):
6 if resource_id is None and (
7 kwargs.get('container') is None and kwargs.get('image') is None
8 ):
9 raise errors.NullResource(
10 'image or container param is None'
11 )
12 return f(self, resource_id, *args, **kwargs)
13 return wrapped
14
```
Path: `docker/version.py`
Content:
```
1 version = "1.2.1-dev"
2 version_info = tuple([int(d) for d in version.replace("-dev", "").split(".")])
3
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docker/utils/decorators.py b/docker/utils/decorators.py
--- a/docker/utils/decorators.py
+++ b/docker/utils/decorators.py
@@ -3,11 +3,14 @@
def check_resource(f):
def wrapped(self, resource_id=None, *args, **kwargs):
- if resource_id is None and (
- kwargs.get('container') is None and kwargs.get('image') is None
- ):
- raise errors.NullResource(
- 'image or container param is None'
- )
+ if resource_id is None:
+ if kwargs.get('container'):
+ resource_id = kwargs.pop('container')
+ elif kwargs.get('image'):
+ resource_id = kwargs.pop('image')
+ else:
+ raise errors.NullResource(
+ 'image or container param is undefined'
+ )
return f(self, resource_id, *args, **kwargs)
return wrapped
diff --git a/docker/version.py b/docker/version.py
--- a/docker/version.py
+++ b/docker/version.py
@@ -1,2 +1,2 @@
-version = "1.2.1-dev"
+version = "1.2.1"
version_info = tuple([int(d) for d in version.replace("-dev", "").split(".")])
|
{"golden_diff": "diff --git a/docker/utils/decorators.py b/docker/utils/decorators.py\n--- a/docker/utils/decorators.py\n+++ b/docker/utils/decorators.py\n@@ -3,11 +3,14 @@\n \n def check_resource(f):\n def wrapped(self, resource_id=None, *args, **kwargs):\n- if resource_id is None and (\n- kwargs.get('container') is None and kwargs.get('image') is None\n- ):\n- raise errors.NullResource(\n- 'image or container param is None'\n- )\n+ if resource_id is None:\n+ if kwargs.get('container'):\n+ resource_id = kwargs.pop('container')\n+ elif kwargs.get('image'):\n+ resource_id = kwargs.pop('image')\n+ else:\n+ raise errors.NullResource(\n+ 'image or container param is undefined'\n+ )\n return f(self, resource_id, *args, **kwargs)\n return wrapped\ndiff --git a/docker/version.py b/docker/version.py\n--- a/docker/version.py\n+++ b/docker/version.py\n@@ -1,2 +1,2 @@\n-version = \"1.2.1-dev\"\n+version = \"1.2.1\"\n version_info = tuple([int(d) for d in version.replace(\"-dev\", \"\").split(\".\")])\n", "issue": "docker.utils.decorators.check_resource problem\nWhen use `docker_client.start(**kwargs)` to start the container, will be raise follow exception:\n\n```\nc.start(**s_kwargs)\n File \"/home/simplecloud/shiyanlou/env/local/lib/python2.7/site-packages/docker/utils/decorators.py\", line 12, in wrapped\n return f(self, resource_id, *args, **kwargs)\nTypeError: start() got multiple values for keyword argument 'container'\n```\n\n", "before_files": [{"content": "from .. import errors\n\n\ndef check_resource(f):\n def wrapped(self, resource_id=None, *args, **kwargs):\n if resource_id is None and (\n kwargs.get('container') is None and kwargs.get('image') is None\n ):\n raise errors.NullResource(\n 'image or container param is None'\n )\n return f(self, resource_id, *args, **kwargs)\n return wrapped\n", "path": "docker/utils/decorators.py"}, {"content": "version = \"1.2.1-dev\"\nversion_info = tuple([int(d) for d in version.replace(\"-dev\", \"\").split(\".\")])\n", "path": "docker/version.py"}], "after_files": [{"content": "from .. import errors\n\n\ndef check_resource(f):\n def wrapped(self, resource_id=None, *args, **kwargs):\n if resource_id is None:\n if kwargs.get('container'):\n resource_id = kwargs.pop('container')\n elif kwargs.get('image'):\n resource_id = kwargs.pop('image')\n else:\n raise errors.NullResource(\n 'image or container param is undefined'\n )\n return f(self, resource_id, *args, **kwargs)\n return wrapped\n", "path": "docker/utils/decorators.py"}, {"content": "version = \"1.2.1\"\nversion_info = tuple([int(d) for d in version.replace(\"-dev\", \"\").split(\".\")])\n", "path": "docker/version.py"}]}
| 514 | 279 |
gh_patches_debug_25492
|
rasdani/github-patches
|
git_diff
|
sql-machine-learning__elasticdl-643
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Create owner references among master and worker pods
Currently we have the following two ways to delete master + worker pods:
* Delete each pod one by one
* Delete all pods related to this elasticdl run via `elasticdl_job_name` label `kubectl delete pod -l elasticdl_job_name=test-job-1559292773-93`
It would be much easier if users could just delete master pod and then worker pods can be delete automatically. This would be possible if there are owner references among master and worker pods.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl/python/elasticdl/master/k8s_client.py`
Content:
```
1 import logging
2 import os
3 import threading
4 import traceback
5
6 from kubernetes import client, config, watch
7 from kubernetes.client import (
8 V1PersistentVolumeClaimVolumeSource as pvcVolumeSource,
9 )
10
11 WORKER_POD_NAME_PREFIX = "elasticdl-worker-"
12
13
14 class Client(object):
15 def __init__(self, *, worker_image, namespace, job_name, event_callback):
16 """
17 ElasticDL k8s client.
18
19 Args:
20 worker_image: Docker image path for ElasticDL workers.
21 namespace: k8s namespace for ElasticDL pods.
22 job_name: ElasticDL job name, should be unique in the namespace.
23 Used as worker pod name prefix and value for "elasticdl" label.
24 event_callback: If not None, an event watcher will be created and
25 events passed to the callback.
26 """
27 if os.getenv("KUBERNETES_SERVICE_HOST"):
28 # We are running inside k8s
29 config.load_incluster_config()
30 else:
31 # Use user's kube config
32 config.load_kube_config()
33
34 self._v1 = client.CoreV1Api()
35 self._logger = logging.getLogger(__name__)
36 self._image = worker_image
37 self._ns = namespace
38 self._job_name = job_name
39 self._event_cb = event_callback
40 if self._event_cb:
41 threading.Thread(
42 target=self._watch, name="event_watcher", daemon=True
43 ).start()
44
45 def _watch(self):
46 stream = watch.Watch().stream(
47 self._v1.list_namespaced_pod,
48 self._ns,
49 label_selector="elasticdl_job_name=" + self._job_name,
50 )
51 for event in stream:
52 try:
53 self._event_cb(event)
54 except Exception:
55 traceback.print_exc()
56
57 def get_worker_pod_name(self, worker_id):
58 return WORKER_POD_NAME_PREFIX + self._job_name + "-" + str(worker_id)
59
60 def _create_worker_pod(
61 self,
62 worker_id,
63 resource_requests,
64 resource_limits,
65 priority,
66 mount_path,
67 volume_name,
68 image_pull_policy,
69 command,
70 args,
71 restart_policy,
72 ):
73 # Worker container config
74 container = client.V1Container(
75 name=self.get_worker_pod_name(worker_id),
76 image=self._image,
77 command=command,
78 resources=client.V1ResourceRequirements(
79 requests=resource_requests, limits=resource_limits
80 ),
81 image_pull_policy=image_pull_policy,
82 args=args,
83 )
84
85 # Pod
86 spec = client.V1PodSpec(
87 containers=[container], restart_policy=restart_policy
88 )
89
90 # Mount data path
91 if mount_path is not None and volume_name is not None:
92 volume = client.V1Volume(
93 name="data-volume",
94 persistent_volume_claim=pvcVolumeSource(
95 claim_name="fileserver-claim", read_only=False
96 ),
97 )
98 spec.volumes = [volume]
99 container.volume_mounts = [
100 client.V1VolumeMount(name=volume_name, mount_path=mount_path)
101 ]
102
103 if priority is not None:
104 spec.priority_class_name = priority
105
106 pod = client.V1Pod(
107 spec=spec,
108 metadata=client.V1ObjectMeta(
109 name=self.get_worker_pod_name(worker_id),
110 labels={
111 "app": "elasticdl",
112 "elasticdl_job_name": self._job_name,
113 },
114 ),
115 )
116 return pod
117
118 def create_worker(
119 self,
120 worker_id,
121 resource_requests,
122 resource_limits,
123 priority=None,
124 mount_path=None,
125 volume_name=None,
126 image_pull_policy=None,
127 command=None,
128 args=None,
129 restart_policy="OnFailure",
130 ):
131 self._logger.info("Creating worker: " + str(worker_id))
132 pod = self._create_worker_pod(
133 worker_id,
134 resource_requests,
135 resource_limits,
136 priority,
137 mount_path,
138 volume_name,
139 image_pull_policy,
140 command=command,
141 args=args,
142 restart_policy=restart_policy,
143 )
144 return self._v1.create_namespaced_pod(self._ns, pod)
145
146 def delete_worker(self, worker_id):
147 self._logger.info("Deleting worker: " + str(worker_id))
148 self._v1.delete_namespaced_pod(
149 self.get_worker_pod_name(worker_id),
150 self._ns,
151 body=client.V1DeleteOptions(grace_period_seconds=0),
152 )
153
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/elasticdl/python/elasticdl/master/k8s_client.py b/elasticdl/python/elasticdl/master/k8s_client.py
--- a/elasticdl/python/elasticdl/master/k8s_client.py
+++ b/elasticdl/python/elasticdl/master/k8s_client.py
@@ -103,6 +103,16 @@
if priority is not None:
spec.priority_class_name = priority
+ # Find that master pod that will be used as the owner reference
+ # for this worker pod.
+ pods = self._v1.list_namespaced_pod(
+ namespace=self._ns,
+ label_selector="elasticdl_job_name=" + self._job_name
+ ).items
+ master_pod = [pod for pod in pods if (
+ pod.metadata.name == "elasticdl-master-" + self._job_name
+ )][0]
+
pod = client.V1Pod(
spec=spec,
metadata=client.V1ObjectMeta(
@@ -111,6 +121,17 @@
"app": "elasticdl",
"elasticdl_job_name": self._job_name,
},
+ # TODO: Add tests for this once we've done refactoring on
+ # k8s client code and the constant strings
+ owner_references=[
+ client.V1OwnerReference(
+ api_version="v1",
+ block_owner_deletion=True,
+ kind="Pod",
+ name=master_pod.metadata.name,
+ uid=master_pod.metadata.uid,
+ ),
+ ],
),
)
return pod
|
{"golden_diff": "diff --git a/elasticdl/python/elasticdl/master/k8s_client.py b/elasticdl/python/elasticdl/master/k8s_client.py\n--- a/elasticdl/python/elasticdl/master/k8s_client.py\n+++ b/elasticdl/python/elasticdl/master/k8s_client.py\n@@ -103,6 +103,16 @@\n if priority is not None:\n spec.priority_class_name = priority\n \n+ # Find that master pod that will be used as the owner reference\n+ # for this worker pod.\n+ pods = self._v1.list_namespaced_pod(\n+ namespace=self._ns,\n+ label_selector=\"elasticdl_job_name=\" + self._job_name\n+ ).items\n+ master_pod = [pod for pod in pods if (\n+ pod.metadata.name == \"elasticdl-master-\" + self._job_name\n+ )][0]\n+\n pod = client.V1Pod(\n spec=spec,\n metadata=client.V1ObjectMeta(\n@@ -111,6 +121,17 @@\n \"app\": \"elasticdl\",\n \"elasticdl_job_name\": self._job_name,\n },\n+ # TODO: Add tests for this once we've done refactoring on\n+ # k8s client code and the constant strings\n+ owner_references=[\n+ client.V1OwnerReference(\n+ api_version=\"v1\",\n+ block_owner_deletion=True,\n+ kind=\"Pod\",\n+ name=master_pod.metadata.name,\n+ uid=master_pod.metadata.uid,\n+ ),\n+ ],\n ),\n )\n return pod\n", "issue": "Create owner references among master and worker pods\nCurrently we have the following two ways to delete master + worker pods:\r\n* Delete each pod one by one\r\n* Delete all pods related to this elasticdl run via `elasticdl_job_name` label `kubectl delete pod -l elasticdl_job_name=test-job-1559292773-93`\r\n\r\nIt would be much easier if users could just delete master pod and then worker pods can be delete automatically. This would be possible if there are owner references among master and worker pods.\n", "before_files": [{"content": "import logging\nimport os\nimport threading\nimport traceback\n\nfrom kubernetes import client, config, watch\nfrom kubernetes.client import (\n V1PersistentVolumeClaimVolumeSource as pvcVolumeSource,\n)\n\nWORKER_POD_NAME_PREFIX = \"elasticdl-worker-\"\n\n\nclass Client(object):\n def __init__(self, *, worker_image, namespace, job_name, event_callback):\n \"\"\"\n ElasticDL k8s client.\n\n Args:\n worker_image: Docker image path for ElasticDL workers.\n namespace: k8s namespace for ElasticDL pods.\n job_name: ElasticDL job name, should be unique in the namespace.\n Used as worker pod name prefix and value for \"elasticdl\" label.\n event_callback: If not None, an event watcher will be created and\n events passed to the callback.\n \"\"\"\n if os.getenv(\"KUBERNETES_SERVICE_HOST\"):\n # We are running inside k8s\n config.load_incluster_config()\n else:\n # Use user's kube config\n config.load_kube_config()\n\n self._v1 = client.CoreV1Api()\n self._logger = logging.getLogger(__name__)\n self._image = worker_image\n self._ns = namespace\n self._job_name = job_name\n self._event_cb = event_callback\n if self._event_cb:\n threading.Thread(\n target=self._watch, name=\"event_watcher\", daemon=True\n ).start()\n\n def _watch(self):\n stream = watch.Watch().stream(\n self._v1.list_namespaced_pod,\n self._ns,\n label_selector=\"elasticdl_job_name=\" + self._job_name,\n )\n for event in stream:\n try:\n self._event_cb(event)\n except Exception:\n traceback.print_exc()\n\n def get_worker_pod_name(self, worker_id):\n return WORKER_POD_NAME_PREFIX + self._job_name + \"-\" + str(worker_id)\n\n def _create_worker_pod(\n self,\n worker_id,\n resource_requests,\n resource_limits,\n priority,\n mount_path,\n volume_name,\n image_pull_policy,\n command,\n args,\n restart_policy,\n ):\n # Worker container config\n container = client.V1Container(\n name=self.get_worker_pod_name(worker_id),\n image=self._image,\n command=command,\n resources=client.V1ResourceRequirements(\n requests=resource_requests, limits=resource_limits\n ),\n image_pull_policy=image_pull_policy,\n args=args,\n )\n\n # Pod\n spec = client.V1PodSpec(\n containers=[container], restart_policy=restart_policy\n )\n\n # Mount data path\n if mount_path is not None and volume_name is not None:\n volume = client.V1Volume(\n name=\"data-volume\",\n persistent_volume_claim=pvcVolumeSource(\n claim_name=\"fileserver-claim\", read_only=False\n ),\n )\n spec.volumes = [volume]\n container.volume_mounts = [\n client.V1VolumeMount(name=volume_name, mount_path=mount_path)\n ]\n\n if priority is not None:\n spec.priority_class_name = priority\n\n pod = client.V1Pod(\n spec=spec,\n metadata=client.V1ObjectMeta(\n name=self.get_worker_pod_name(worker_id),\n labels={\n \"app\": \"elasticdl\",\n \"elasticdl_job_name\": self._job_name,\n },\n ),\n )\n return pod\n\n def create_worker(\n self,\n worker_id,\n resource_requests,\n resource_limits,\n priority=None,\n mount_path=None,\n volume_name=None,\n image_pull_policy=None,\n command=None,\n args=None,\n restart_policy=\"OnFailure\",\n ):\n self._logger.info(\"Creating worker: \" + str(worker_id))\n pod = self._create_worker_pod(\n worker_id,\n resource_requests,\n resource_limits,\n priority,\n mount_path,\n volume_name,\n image_pull_policy,\n command=command,\n args=args,\n restart_policy=restart_policy,\n )\n return self._v1.create_namespaced_pod(self._ns, pod)\n\n def delete_worker(self, worker_id):\n self._logger.info(\"Deleting worker: \" + str(worker_id))\n self._v1.delete_namespaced_pod(\n self.get_worker_pod_name(worker_id),\n self._ns,\n body=client.V1DeleteOptions(grace_period_seconds=0),\n )\n", "path": "elasticdl/python/elasticdl/master/k8s_client.py"}], "after_files": [{"content": "import logging\nimport os\nimport threading\nimport traceback\n\nfrom kubernetes import client, config, watch\nfrom kubernetes.client import (\n V1PersistentVolumeClaimVolumeSource as pvcVolumeSource,\n)\n\nWORKER_POD_NAME_PREFIX = \"elasticdl-worker-\"\n\n\nclass Client(object):\n def __init__(self, *, worker_image, namespace, job_name, event_callback):\n \"\"\"\n ElasticDL k8s client.\n\n Args:\n worker_image: Docker image path for ElasticDL workers.\n namespace: k8s namespace for ElasticDL pods.\n job_name: ElasticDL job name, should be unique in the namespace.\n Used as worker pod name prefix and value for \"elasticdl\" label.\n event_callback: If not None, an event watcher will be created and\n events passed to the callback.\n \"\"\"\n if os.getenv(\"KUBERNETES_SERVICE_HOST\"):\n # We are running inside k8s\n config.load_incluster_config()\n else:\n # Use user's kube config\n config.load_kube_config()\n\n self._v1 = client.CoreV1Api()\n self._logger = logging.getLogger(__name__)\n self._image = worker_image\n self._ns = namespace\n self._job_name = job_name\n self._event_cb = event_callback\n if self._event_cb:\n threading.Thread(\n target=self._watch, name=\"event_watcher\", daemon=True\n ).start()\n\n def _watch(self):\n stream = watch.Watch().stream(\n self._v1.list_namespaced_pod,\n self._ns,\n label_selector=\"elasticdl_job_name=\" + self._job_name,\n )\n for event in stream:\n try:\n self._event_cb(event)\n except Exception:\n traceback.print_exc()\n\n def get_worker_pod_name(self, worker_id):\n return WORKER_POD_NAME_PREFIX + self._job_name + \"-\" + str(worker_id)\n\n def _create_worker_pod(\n self,\n worker_id,\n resource_requests,\n resource_limits,\n priority,\n mount_path,\n volume_name,\n image_pull_policy,\n command,\n args,\n restart_policy,\n ):\n # Worker container config\n container = client.V1Container(\n name=self.get_worker_pod_name(worker_id),\n image=self._image,\n command=command,\n resources=client.V1ResourceRequirements(\n requests=resource_requests, limits=resource_limits\n ),\n image_pull_policy=image_pull_policy,\n args=args,\n )\n\n # Pod\n spec = client.V1PodSpec(\n containers=[container], restart_policy=restart_policy\n )\n\n # Mount data path\n if mount_path is not None and volume_name is not None:\n volume = client.V1Volume(\n name=\"data-volume\",\n persistent_volume_claim=pvcVolumeSource(\n claim_name=\"fileserver-claim\", read_only=False\n ),\n )\n spec.volumes = [volume]\n container.volume_mounts = [\n client.V1VolumeMount(name=volume_name, mount_path=mount_path)\n ]\n\n if priority is not None:\n spec.priority_class_name = priority\n\n # Find that master pod that will be used as the owner reference\n # for this worker pod.\n pods = self._v1.list_namespaced_pod(\n namespace=self._ns,\n label_selector=\"elasticdl_job_name=\" + self._job_name\n ).items\n master_pod = [pod for pod in pods if (\n pod.metadata.name == \"elasticdl-master-\" + self._job_name\n )][0]\n\n pod = client.V1Pod(\n spec=spec,\n metadata=client.V1ObjectMeta(\n name=self.get_worker_pod_name(worker_id),\n labels={\n \"app\": \"elasticdl\",\n \"elasticdl_job_name\": self._job_name,\n },\n # TODO: Add tests for this once we've done refactoring on\n # k8s client code and the constant strings\n owner_references=[\n client.V1OwnerReference(\n api_version=\"v1\",\n block_owner_deletion=True,\n kind=\"Pod\",\n name=master_pod.metadata.name,\n uid=master_pod.metadata.uid,\n ),\n ],\n ),\n )\n return pod\n\n def create_worker(\n self,\n worker_id,\n resource_requests,\n resource_limits,\n priority=None,\n mount_path=None,\n volume_name=None,\n image_pull_policy=None,\n command=None,\n args=None,\n restart_policy=\"OnFailure\",\n ):\n self._logger.info(\"Creating worker: \" + str(worker_id))\n pod = self._create_worker_pod(\n worker_id,\n resource_requests,\n resource_limits,\n priority,\n mount_path,\n volume_name,\n image_pull_policy,\n command=command,\n args=args,\n restart_policy=restart_policy,\n )\n return self._v1.create_namespaced_pod(self._ns, pod)\n\n def delete_worker(self, worker_id):\n self._logger.info(\"Deleting worker: \" + str(worker_id))\n self._v1.delete_namespaced_pod(\n self.get_worker_pod_name(worker_id),\n self._ns,\n body=client.V1DeleteOptions(grace_period_seconds=0),\n )\n", "path": "elasticdl/python/elasticdl/master/k8s_client.py"}]}
| 1,683 | 352 |
gh_patches_debug_11323
|
rasdani/github-patches
|
git_diff
|
keras-team__keras-19201
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typo in keras.distribution.initialize()
Hi,
There is a typo when calling `keras.distribution.initialize` due to a typo in the jax backend. The function pass the `corrdinator_address` argument instead of `coordinator_address` to `jax.distributed.initialize`
```log
---> 13 keras.distribution.initialize()
File /usr/local/lib/python3.10/site-packages/keras/src/distribution/distribution_lib.py:131, in initialize(job_addresses, num_processes, proceed_id)
129 if proceed_id is None and "KERAS_DISTRIBUTION_PROCESS_ID" in os.environ:
130 proceed_id = int(os.environ["KERAS_DISTRIBUTION_PROCESS_ID"])
--> 131 distribution_lib.initialize(job_addresses, num_processes, proceed_id)
File /usr/local/lib/python3.10/site-packages/keras/src/backend/jax/distribution_lib.py:207, in initialize(job_addresses, num_processes, process_id)
204 else:
205 corrdinator_address = job_addresses
--> 207 jax.distributed.initialize(
208 corrdinator_address=corrdinator_address,
209 num_processes=num_processes,
210 process_id=process_id,
211 )
TypeError: initialize() got an unexpected keyword argument 'corrdinator_address'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `keras/backend/jax/distribution_lib.py`
Content:
```
1 """!!!DO NOT USE!!!
2
3 Distribution related class for JAX backend.
4
5 This is just a prototype and we might want to unify it
6 with other backends in the future.
7 """
8
9 import jax
10 import numpy as np
11
12 from keras.utils import jax_utils
13
14
15 def list_devices(device_type=None):
16 """Return all the available devices based on the device type.
17
18 Note that this should return the global devices in a distributed setting.
19
20 Args:
21 device_type: string of `"cpu"`, `"gpu"` or `"tpu"`. Defaults to `"gpu"`
22 or `"tpu"` if available when device_type is not provided. Otherwise
23 will return the `"cpu"` devices.
24
25 Return:
26 List of devices that are available for distribute computation.
27 """
28 device_type = device_type.lower() if device_type else None
29 jax_devices = jax.devices(backend=device_type)
30 return [f"{device.platform}:{device.id}" for device in jax_devices]
31
32
33 def distribute_variable(value, layout):
34 """Create a distributed variable for JAX.
35
36 Since JAX doesn't have a variable class, this will just return a `jax.Array`
37 with the corresponding layout/sharding specified.
38
39 Note that this function should be used in eager context, not in jitted
40 function.
41
42 Args:
43 value: the initial value of the variable.
44 layout: `TensorLayout` for the created variable, or a
45 `jax.sharding.Sharding` instance.
46
47 Returns:
48 jax.Array which is the distributed variable.
49 """
50 if not isinstance(layout, jax.sharding.Sharding):
51 layout = _to_jax_layout(layout)
52 if isinstance(
53 value, (jax.Array, jax.numpy.ndarray)
54 ) and value.sharding.is_equivalent_to(layout, ndim=len(value.shape)):
55 # Skip the relayout if the value is already having the proper sharding
56 return value
57
58 if layout.is_fully_addressable:
59 return jax.device_put(value, layout)
60 else:
61 # Need to only distribute the value to local addressible devices, and
62 # repack them back into global format.
63 mapping = layout.addressable_devices_indices_map(value.shape)
64 local_values = jax.device_put(
65 [value[i] for i in mapping.values()], list(mapping.keys())
66 )
67 global_value = jax.make_array_from_single_device_arrays(
68 value.shape, layout, local_values
69 )
70 return global_value
71
72
73 def distribute_tensor(tensor, layout):
74 """Distribute the tensor based on the layout.
75
76 Note that this function can be used both in eager context, or within a
77 jitted function.
78
79 Args:
80 tensor: `jax.Array` that need to be distributed.
81 layout: `TensorLayout` for the distribution information, or a
82 `jax.sharding.Sharding` instance.
83
84 Returns:
85 Distributed value.
86 """
87 if not isinstance(layout, jax.sharding.Sharding):
88 layout = _to_jax_layout(layout)
89 # TODO(scottzhu): This might not be a cheap check, we should consider
90 # have some proper JAX API for doing this check.
91 if jax_utils.is_in_jax_tracing_scope():
92 return jax.lax.with_sharding_constraint(tensor, layout)
93
94 if layout.is_fully_addressable:
95 return jax.device_put(tensor, layout)
96 else:
97 # Need to only distribute the value to local addressible devices, and
98 # repack them back into global format.
99 mapping = layout.addressable_devices_indices_map(tensor.shape)
100 local_values = jax.device_put(
101 [tensor[i] for i in mapping.values()], list(mapping.keys())
102 )
103 global_value = jax.make_array_from_single_device_arrays(
104 tensor.shape, layout, local_values
105 )
106 return global_value
107
108
109 def distribute_data_input(inputs, layout):
110 """Distribute the input data with the corresponding layout.
111
112 Note that the inputs here is a local worker batch. Within the local worker,
113 the data need to be further partitioned to map to the each of the devices.
114
115 Args:
116 inputs: `jax.Array` that is already sharded to a local process size.
117 layout: `TensorLayout` for the distribution information, or a
118 `jax.sharding.Sharding` instance.
119
120 Returns:
121 Distributed inputs thats been properly put to local devices.
122 """
123 if not isinstance(layout, jax.sharding.Sharding):
124 layout = _to_jax_layout(layout)
125 if layout.is_fully_addressable:
126 return jax.device_put(inputs, layout)
127
128 # We need the jax mesh information to determine how to place the data
129 # on to each of the worker.
130 jax_mesh = layout.mesh
131 mesh_rank = len(jax_mesh.shape)
132 per_process_batch_size = inputs.shape[0]
133 if mesh_rank == 1:
134 # This is data parallel mesh only. We will split the full data
135 # across the batch dim.
136 num_split = jax.local_device_count()
137 per_replica_batch_size = per_process_batch_size // num_split
138 if per_process_batch_size % per_replica_batch_size != 0:
139 raise ValueError(
140 f"The local batch size {per_process_batch_size} is not"
141 "divisible by the number of local replicas "
142 f"{num_split}"
143 )
144 global_batch_size = per_process_batch_size * jax.process_count()
145 per_replica_batches = jax.numpy.split(inputs, num_split, axis=0)
146 elif mesh_rank == 2:
147 # Data+Model parallel
148 # In this case, we need to check if the mesh batch dim shape is large
149 # than number of local devices, so that we can decide whether a split
150 # is needed for the data, or a repeat/copy of the data is needed for
151 # each of the device.
152 # TODO(scottzhu): The mesh batch dim name is not available here, since
153 # we only have jax Mesh. We assume the first dim is for batch, and
154 # second dim is for model for now.
155 mesh_batch_dim_size = list(jax_mesh.shape.values())[0]
156 local_device_count = jax.local_device_count()
157 if mesh_batch_dim_size < local_device_count:
158 # No split needed, we only need to repeat here.
159 global_batch_size = per_process_batch_size
160 per_replica_batches = [inputs for _ in range(local_device_count)]
161 else:
162 # Note that global batch size is not simply per_process_batch_size *
163 # num_process. It actually depends on the model dim size.
164 global_batch_size = per_process_batch_size * (
165 mesh_batch_dim_size // local_device_count
166 )
167 per_replica_batches = jax.numpy.split(
168 inputs, local_device_count, axis=0
169 )
170 else:
171 raise ValueError(
172 "Only 1D or 2D mesh is supported at the moment. "
173 f"Received mesh shape = {jax_mesh.shape}"
174 )
175
176 global_shape = (global_batch_size,) + inputs.shape[1:]
177 global_batch_array = jax.make_array_from_single_device_arrays(
178 global_shape,
179 layout,
180 arrays=[
181 jax.device_put(batch, device)
182 for batch, device in zip(
183 per_replica_batches, layout.addressable_devices
184 )
185 ],
186 )
187 return global_batch_array
188
189
190 def initialize(job_addresses, num_processes, process_id):
191 if job_addresses and "," in job_addresses:
192 # When user provide all the job addresses, we will split and get the
193 # first one, which is the coordinator.
194 job_addresses = job_addresses.split(",")
195 # Do a sanity check to make sure the number of addresses also match
196 # the num_processes.
197 if num_processes is not None and num_processes != len(job_addresses):
198 raise ValueError(
199 f"The provided job_addresses {job_addresses} has "
200 f"{len(job_addresses)} jobs, but num_processes is "
201 f"{num_processes}"
202 )
203 corrdinator_address = job_addresses[0]
204 else:
205 corrdinator_address = job_addresses
206
207 jax.distributed.initialize(
208 corrdinator_address=corrdinator_address,
209 num_processes=num_processes,
210 process_id=process_id,
211 )
212
213
214 def num_processes():
215 """Return the number of processes for the current distribution setting."""
216 return jax.process_count()
217
218
219 def process_id():
220 """Return the current process ID for the distribution setting."""
221 return jax.process_index()
222
223
224 def _to_jax_device(device_id):
225 if isinstance(device_id, jax.Device):
226 return device_id
227 device_type, index = device_id.split(":")
228 index = int(index)
229 devices = jax.devices(backend=device_type)
230 if index >= len(devices):
231 raise ValueError(f"Unknown device: {device_id}")
232 return devices[index]
233
234
235 def _to_jax_mesh(device_mesh):
236 """Convert the DeviceMesh to JAX backend specific Mesh.
237
238 Args:
239 device_mesh: DeviceMesh instance to convert.
240
241 Returns:
242 A `jax.sharding.Mesh` instance.
243 """
244 shape = device_mesh.devices.shape
245 devices = [_to_jax_device(d) for d in device_mesh.devices.flatten()]
246 devices = np.array(devices).reshape(shape)
247 return jax.sharding.Mesh(devices, device_mesh.axis_names)
248
249
250 def _to_jax_layout(tensor_layout):
251 """Convert the TensorLayout to JAX backend specific Sharding.
252
253 Args:
254 tensor_layout: TensorLayout instance to convert.
255
256 Returns:
257 A `jax.sharding.NamedSharding` instance.
258 """
259 if tensor_layout.device_mesh is None:
260 raise ValueError(
261 "Cannot create sharding when device mesh is not set "
262 "for TensorLayout."
263 )
264 partition_spec = jax.sharding.PartitionSpec(*tensor_layout.axes)
265 jax_mesh = _to_jax_mesh(tensor_layout.device_mesh)
266 return jax.sharding.NamedSharding(jax_mesh, partition_spec)
267
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/keras/backend/jax/distribution_lib.py b/keras/backend/jax/distribution_lib.py
--- a/keras/backend/jax/distribution_lib.py
+++ b/keras/backend/jax/distribution_lib.py
@@ -200,12 +200,12 @@
f"{len(job_addresses)} jobs, but num_processes is "
f"{num_processes}"
)
- corrdinator_address = job_addresses[0]
+ coordinator_address = job_addresses[0]
else:
- corrdinator_address = job_addresses
+ coordinator_address = job_addresses
jax.distributed.initialize(
- corrdinator_address=corrdinator_address,
+ coordinator_address=coordinator_address,
num_processes=num_processes,
process_id=process_id,
)
|
{"golden_diff": "diff --git a/keras/backend/jax/distribution_lib.py b/keras/backend/jax/distribution_lib.py\n--- a/keras/backend/jax/distribution_lib.py\n+++ b/keras/backend/jax/distribution_lib.py\n@@ -200,12 +200,12 @@\n f\"{len(job_addresses)} jobs, but num_processes is \"\n f\"{num_processes}\"\n )\n- corrdinator_address = job_addresses[0]\n+ coordinator_address = job_addresses[0]\n else:\n- corrdinator_address = job_addresses\n+ coordinator_address = job_addresses\n \n jax.distributed.initialize(\n- corrdinator_address=corrdinator_address,\n+ coordinator_address=coordinator_address,\n num_processes=num_processes,\n process_id=process_id,\n )\n", "issue": "Typo in keras.distribution.initialize()\nHi,\r\n\r\nThere is a typo when calling `keras.distribution.initialize` due to a typo in the jax backend. The function pass the `corrdinator_address` argument instead of `coordinator_address` to `jax.distributed.initialize`\r\n\r\n```log\r\n---> 13 keras.distribution.initialize()\r\n\r\nFile /usr/local/lib/python3.10/site-packages/keras/src/distribution/distribution_lib.py:131, in initialize(job_addresses, num_processes, proceed_id)\r\n 129 if proceed_id is None and \"KERAS_DISTRIBUTION_PROCESS_ID\" in os.environ:\r\n 130 proceed_id = int(os.environ[\"KERAS_DISTRIBUTION_PROCESS_ID\"])\r\n--> 131 distribution_lib.initialize(job_addresses, num_processes, proceed_id)\r\n\r\nFile /usr/local/lib/python3.10/site-packages/keras/src/backend/jax/distribution_lib.py:207, in initialize(job_addresses, num_processes, process_id)\r\n 204 else:\r\n 205 corrdinator_address = job_addresses\r\n--> 207 jax.distributed.initialize(\r\n 208 corrdinator_address=corrdinator_address,\r\n 209 num_processes=num_processes,\r\n 210 process_id=process_id,\r\n 211 )\r\n\r\nTypeError: initialize() got an unexpected keyword argument 'corrdinator_address'\r\n```\r\n\r\n\n", "before_files": [{"content": "\"\"\"!!!DO NOT USE!!!\n\nDistribution related class for JAX backend.\n\nThis is just a prototype and we might want to unify it\nwith other backends in the future.\n\"\"\"\n\nimport jax\nimport numpy as np\n\nfrom keras.utils import jax_utils\n\n\ndef list_devices(device_type=None):\n \"\"\"Return all the available devices based on the device type.\n\n Note that this should return the global devices in a distributed setting.\n\n Args:\n device_type: string of `\"cpu\"`, `\"gpu\"` or `\"tpu\"`. Defaults to `\"gpu\"`\n or `\"tpu\"` if available when device_type is not provided. Otherwise\n will return the `\"cpu\"` devices.\n\n Return:\n List of devices that are available for distribute computation.\n \"\"\"\n device_type = device_type.lower() if device_type else None\n jax_devices = jax.devices(backend=device_type)\n return [f\"{device.platform}:{device.id}\" for device in jax_devices]\n\n\ndef distribute_variable(value, layout):\n \"\"\"Create a distributed variable for JAX.\n\n Since JAX doesn't have a variable class, this will just return a `jax.Array`\n with the corresponding layout/sharding specified.\n\n Note that this function should be used in eager context, not in jitted\n function.\n\n Args:\n value: the initial value of the variable.\n layout: `TensorLayout` for the created variable, or a\n `jax.sharding.Sharding` instance.\n\n Returns:\n jax.Array which is the distributed variable.\n \"\"\"\n if not isinstance(layout, jax.sharding.Sharding):\n layout = _to_jax_layout(layout)\n if isinstance(\n value, (jax.Array, jax.numpy.ndarray)\n ) and value.sharding.is_equivalent_to(layout, ndim=len(value.shape)):\n # Skip the relayout if the value is already having the proper sharding\n return value\n\n if layout.is_fully_addressable:\n return jax.device_put(value, layout)\n else:\n # Need to only distribute the value to local addressible devices, and\n # repack them back into global format.\n mapping = layout.addressable_devices_indices_map(value.shape)\n local_values = jax.device_put(\n [value[i] for i in mapping.values()], list(mapping.keys())\n )\n global_value = jax.make_array_from_single_device_arrays(\n value.shape, layout, local_values\n )\n return global_value\n\n\ndef distribute_tensor(tensor, layout):\n \"\"\"Distribute the tensor based on the layout.\n\n Note that this function can be used both in eager context, or within a\n jitted function.\n\n Args:\n tensor: `jax.Array` that need to be distributed.\n layout: `TensorLayout` for the distribution information, or a\n `jax.sharding.Sharding` instance.\n\n Returns:\n Distributed value.\n \"\"\"\n if not isinstance(layout, jax.sharding.Sharding):\n layout = _to_jax_layout(layout)\n # TODO(scottzhu): This might not be a cheap check, we should consider\n # have some proper JAX API for doing this check.\n if jax_utils.is_in_jax_tracing_scope():\n return jax.lax.with_sharding_constraint(tensor, layout)\n\n if layout.is_fully_addressable:\n return jax.device_put(tensor, layout)\n else:\n # Need to only distribute the value to local addressible devices, and\n # repack them back into global format.\n mapping = layout.addressable_devices_indices_map(tensor.shape)\n local_values = jax.device_put(\n [tensor[i] for i in mapping.values()], list(mapping.keys())\n )\n global_value = jax.make_array_from_single_device_arrays(\n tensor.shape, layout, local_values\n )\n return global_value\n\n\ndef distribute_data_input(inputs, layout):\n \"\"\"Distribute the input data with the corresponding layout.\n\n Note that the inputs here is a local worker batch. Within the local worker,\n the data need to be further partitioned to map to the each of the devices.\n\n Args:\n inputs: `jax.Array` that is already sharded to a local process size.\n layout: `TensorLayout` for the distribution information, or a\n `jax.sharding.Sharding` instance.\n\n Returns:\n Distributed inputs thats been properly put to local devices.\n \"\"\"\n if not isinstance(layout, jax.sharding.Sharding):\n layout = _to_jax_layout(layout)\n if layout.is_fully_addressable:\n return jax.device_put(inputs, layout)\n\n # We need the jax mesh information to determine how to place the data\n # on to each of the worker.\n jax_mesh = layout.mesh\n mesh_rank = len(jax_mesh.shape)\n per_process_batch_size = inputs.shape[0]\n if mesh_rank == 1:\n # This is data parallel mesh only. We will split the full data\n # across the batch dim.\n num_split = jax.local_device_count()\n per_replica_batch_size = per_process_batch_size // num_split\n if per_process_batch_size % per_replica_batch_size != 0:\n raise ValueError(\n f\"The local batch size {per_process_batch_size} is not\"\n \"divisible by the number of local replicas \"\n f\"{num_split}\"\n )\n global_batch_size = per_process_batch_size * jax.process_count()\n per_replica_batches = jax.numpy.split(inputs, num_split, axis=0)\n elif mesh_rank == 2:\n # Data+Model parallel\n # In this case, we need to check if the mesh batch dim shape is large\n # than number of local devices, so that we can decide whether a split\n # is needed for the data, or a repeat/copy of the data is needed for\n # each of the device.\n # TODO(scottzhu): The mesh batch dim name is not available here, since\n # we only have jax Mesh. We assume the first dim is for batch, and\n # second dim is for model for now.\n mesh_batch_dim_size = list(jax_mesh.shape.values())[0]\n local_device_count = jax.local_device_count()\n if mesh_batch_dim_size < local_device_count:\n # No split needed, we only need to repeat here.\n global_batch_size = per_process_batch_size\n per_replica_batches = [inputs for _ in range(local_device_count)]\n else:\n # Note that global batch size is not simply per_process_batch_size *\n # num_process. It actually depends on the model dim size.\n global_batch_size = per_process_batch_size * (\n mesh_batch_dim_size // local_device_count\n )\n per_replica_batches = jax.numpy.split(\n inputs, local_device_count, axis=0\n )\n else:\n raise ValueError(\n \"Only 1D or 2D mesh is supported at the moment. \"\n f\"Received mesh shape = {jax_mesh.shape}\"\n )\n\n global_shape = (global_batch_size,) + inputs.shape[1:]\n global_batch_array = jax.make_array_from_single_device_arrays(\n global_shape,\n layout,\n arrays=[\n jax.device_put(batch, device)\n for batch, device in zip(\n per_replica_batches, layout.addressable_devices\n )\n ],\n )\n return global_batch_array\n\n\ndef initialize(job_addresses, num_processes, process_id):\n if job_addresses and \",\" in job_addresses:\n # When user provide all the job addresses, we will split and get the\n # first one, which is the coordinator.\n job_addresses = job_addresses.split(\",\")\n # Do a sanity check to make sure the number of addresses also match\n # the num_processes.\n if num_processes is not None and num_processes != len(job_addresses):\n raise ValueError(\n f\"The provided job_addresses {job_addresses} has \"\n f\"{len(job_addresses)} jobs, but num_processes is \"\n f\"{num_processes}\"\n )\n corrdinator_address = job_addresses[0]\n else:\n corrdinator_address = job_addresses\n\n jax.distributed.initialize(\n corrdinator_address=corrdinator_address,\n num_processes=num_processes,\n process_id=process_id,\n )\n\n\ndef num_processes():\n \"\"\"Return the number of processes for the current distribution setting.\"\"\"\n return jax.process_count()\n\n\ndef process_id():\n \"\"\"Return the current process ID for the distribution setting.\"\"\"\n return jax.process_index()\n\n\ndef _to_jax_device(device_id):\n if isinstance(device_id, jax.Device):\n return device_id\n device_type, index = device_id.split(\":\")\n index = int(index)\n devices = jax.devices(backend=device_type)\n if index >= len(devices):\n raise ValueError(f\"Unknown device: {device_id}\")\n return devices[index]\n\n\ndef _to_jax_mesh(device_mesh):\n \"\"\"Convert the DeviceMesh to JAX backend specific Mesh.\n\n Args:\n device_mesh: DeviceMesh instance to convert.\n\n Returns:\n A `jax.sharding.Mesh` instance.\n \"\"\"\n shape = device_mesh.devices.shape\n devices = [_to_jax_device(d) for d in device_mesh.devices.flatten()]\n devices = np.array(devices).reshape(shape)\n return jax.sharding.Mesh(devices, device_mesh.axis_names)\n\n\ndef _to_jax_layout(tensor_layout):\n \"\"\"Convert the TensorLayout to JAX backend specific Sharding.\n\n Args:\n tensor_layout: TensorLayout instance to convert.\n\n Returns:\n A `jax.sharding.NamedSharding` instance.\n \"\"\"\n if tensor_layout.device_mesh is None:\n raise ValueError(\n \"Cannot create sharding when device mesh is not set \"\n \"for TensorLayout.\"\n )\n partition_spec = jax.sharding.PartitionSpec(*tensor_layout.axes)\n jax_mesh = _to_jax_mesh(tensor_layout.device_mesh)\n return jax.sharding.NamedSharding(jax_mesh, partition_spec)\n", "path": "keras/backend/jax/distribution_lib.py"}], "after_files": [{"content": "\"\"\"!!!DO NOT USE!!!\n\nDistribution related class for JAX backend.\n\nThis is just a prototype and we might want to unify it\nwith other backends in the future.\n\"\"\"\n\nimport jax\nimport numpy as np\n\nfrom keras.utils import jax_utils\n\n\ndef list_devices(device_type=None):\n \"\"\"Return all the available devices based on the device type.\n\n Note that this should return the global devices in a distributed setting.\n\n Args:\n device_type: string of `\"cpu\"`, `\"gpu\"` or `\"tpu\"`. Defaults to `\"gpu\"`\n or `\"tpu\"` if available when device_type is not provided. Otherwise\n will return the `\"cpu\"` devices.\n\n Return:\n List of devices that are available for distribute computation.\n \"\"\"\n device_type = device_type.lower() if device_type else None\n jax_devices = jax.devices(backend=device_type)\n return [f\"{device.platform}:{device.id}\" for device in jax_devices]\n\n\ndef distribute_variable(value, layout):\n \"\"\"Create a distributed variable for JAX.\n\n Since JAX doesn't have a variable class, this will just return a `jax.Array`\n with the corresponding layout/sharding specified.\n\n Note that this function should be used in eager context, not in jitted\n function.\n\n Args:\n value: the initial value of the variable.\n layout: `TensorLayout` for the created variable, or a\n `jax.sharding.Sharding` instance.\n\n Returns:\n jax.Array which is the distributed variable.\n \"\"\"\n if not isinstance(layout, jax.sharding.Sharding):\n layout = _to_jax_layout(layout)\n if isinstance(\n value, (jax.Array, jax.numpy.ndarray)\n ) and value.sharding.is_equivalent_to(layout, ndim=len(value.shape)):\n # Skip the relayout if the value is already having the proper sharding\n return value\n\n if layout.is_fully_addressable:\n return jax.device_put(value, layout)\n else:\n # Need to only distribute the value to local addressible devices, and\n # repack them back into global format.\n mapping = layout.addressable_devices_indices_map(value.shape)\n local_values = jax.device_put(\n [value[i] for i in mapping.values()], list(mapping.keys())\n )\n global_value = jax.make_array_from_single_device_arrays(\n value.shape, layout, local_values\n )\n return global_value\n\n\ndef distribute_tensor(tensor, layout):\n \"\"\"Distribute the tensor based on the layout.\n\n Note that this function can be used both in eager context, or within a\n jitted function.\n\n Args:\n tensor: `jax.Array` that need to be distributed.\n layout: `TensorLayout` for the distribution information, or a\n `jax.sharding.Sharding` instance.\n\n Returns:\n Distributed value.\n \"\"\"\n if not isinstance(layout, jax.sharding.Sharding):\n layout = _to_jax_layout(layout)\n # TODO(scottzhu): This might not be a cheap check, we should consider\n # have some proper JAX API for doing this check.\n if jax_utils.is_in_jax_tracing_scope():\n return jax.lax.with_sharding_constraint(tensor, layout)\n\n if layout.is_fully_addressable:\n return jax.device_put(tensor, layout)\n else:\n # Need to only distribute the value to local addressible devices, and\n # repack them back into global format.\n mapping = layout.addressable_devices_indices_map(tensor.shape)\n local_values = jax.device_put(\n [tensor[i] for i in mapping.values()], list(mapping.keys())\n )\n global_value = jax.make_array_from_single_device_arrays(\n tensor.shape, layout, local_values\n )\n return global_value\n\n\ndef distribute_data_input(inputs, layout):\n \"\"\"Distribute the input data with the corresponding layout.\n\n Note that the inputs here is a local worker batch. Within the local worker,\n the data need to be further partitioned to map to the each of the devices.\n\n Args:\n inputs: `jax.Array` that is already sharded to a local process size.\n layout: `TensorLayout` for the distribution information, or a\n `jax.sharding.Sharding` instance.\n\n Returns:\n Distributed inputs thats been properly put to local devices.\n \"\"\"\n if not isinstance(layout, jax.sharding.Sharding):\n layout = _to_jax_layout(layout)\n if layout.is_fully_addressable:\n return jax.device_put(inputs, layout)\n\n # We need the jax mesh information to determine how to place the data\n # on to each of the worker.\n jax_mesh = layout.mesh\n mesh_rank = len(jax_mesh.shape)\n per_process_batch_size = inputs.shape[0]\n if mesh_rank == 1:\n # This is data parallel mesh only. We will split the full data\n # across the batch dim.\n num_split = jax.local_device_count()\n per_replica_batch_size = per_process_batch_size // num_split\n if per_process_batch_size % per_replica_batch_size != 0:\n raise ValueError(\n f\"The local batch size {per_process_batch_size} is not\"\n \"divisible by the number of local replicas \"\n f\"{num_split}\"\n )\n global_batch_size = per_process_batch_size * jax.process_count()\n per_replica_batches = jax.numpy.split(inputs, num_split, axis=0)\n elif mesh_rank == 2:\n # Data+Model parallel\n # In this case, we need to check if the mesh batch dim shape is large\n # than number of local devices, so that we can decide whether a split\n # is needed for the data, or a repeat/copy of the data is needed for\n # each of the device.\n # TODO(scottzhu): The mesh batch dim name is not available here, since\n # we only have jax Mesh. We assume the first dim is for batch, and\n # second dim is for model for now.\n mesh_batch_dim_size = list(jax_mesh.shape.values())[0]\n local_device_count = jax.local_device_count()\n if mesh_batch_dim_size < local_device_count:\n # No split needed, we only need to repeat here.\n global_batch_size = per_process_batch_size\n per_replica_batches = [inputs for _ in range(local_device_count)]\n else:\n # Note that global batch size is not simply per_process_batch_size *\n # num_process. It actually depends on the model dim size.\n global_batch_size = per_process_batch_size * (\n mesh_batch_dim_size // local_device_count\n )\n per_replica_batches = jax.numpy.split(\n inputs, local_device_count, axis=0\n )\n else:\n raise ValueError(\n \"Only 1D or 2D mesh is supported at the moment. \"\n f\"Received mesh shape = {jax_mesh.shape}\"\n )\n\n global_shape = (global_batch_size,) + inputs.shape[1:]\n global_batch_array = jax.make_array_from_single_device_arrays(\n global_shape,\n layout,\n arrays=[\n jax.device_put(batch, device)\n for batch, device in zip(\n per_replica_batches, layout.addressable_devices\n )\n ],\n )\n return global_batch_array\n\n\ndef initialize(job_addresses, num_processes, process_id):\n if job_addresses and \",\" in job_addresses:\n # When user provide all the job addresses, we will split and get the\n # first one, which is the coordinator.\n job_addresses = job_addresses.split(\",\")\n # Do a sanity check to make sure the number of addresses also match\n # the num_processes.\n if num_processes is not None and num_processes != len(job_addresses):\n raise ValueError(\n f\"The provided job_addresses {job_addresses} has \"\n f\"{len(job_addresses)} jobs, but num_processes is \"\n f\"{num_processes}\"\n )\n coordinator_address = job_addresses[0]\n else:\n coordinator_address = job_addresses\n\n jax.distributed.initialize(\n coordinator_address=coordinator_address,\n num_processes=num_processes,\n process_id=process_id,\n )\n\n\ndef num_processes():\n \"\"\"Return the number of processes for the current distribution setting.\"\"\"\n return jax.process_count()\n\n\ndef process_id():\n \"\"\"Return the current process ID for the distribution setting.\"\"\"\n return jax.process_index()\n\n\ndef _to_jax_device(device_id):\n if isinstance(device_id, jax.Device):\n return device_id\n device_type, index = device_id.split(\":\")\n index = int(index)\n devices = jax.devices(backend=device_type)\n if index >= len(devices):\n raise ValueError(f\"Unknown device: {device_id}\")\n return devices[index]\n\n\ndef _to_jax_mesh(device_mesh):\n \"\"\"Convert the DeviceMesh to JAX backend specific Mesh.\n\n Args:\n device_mesh: DeviceMesh instance to convert.\n\n Returns:\n A `jax.sharding.Mesh` instance.\n \"\"\"\n shape = device_mesh.devices.shape\n devices = [_to_jax_device(d) for d in device_mesh.devices.flatten()]\n devices = np.array(devices).reshape(shape)\n return jax.sharding.Mesh(devices, device_mesh.axis_names)\n\n\ndef _to_jax_layout(tensor_layout):\n \"\"\"Convert the TensorLayout to JAX backend specific Sharding.\n\n Args:\n tensor_layout: TensorLayout instance to convert.\n\n Returns:\n A `jax.sharding.NamedSharding` instance.\n \"\"\"\n if tensor_layout.device_mesh is None:\n raise ValueError(\n \"Cannot create sharding when device mesh is not set \"\n \"for TensorLayout.\"\n )\n partition_spec = jax.sharding.PartitionSpec(*tensor_layout.axes)\n jax_mesh = _to_jax_mesh(tensor_layout.device_mesh)\n return jax.sharding.NamedSharding(jax_mesh, partition_spec)\n", "path": "keras/backend/jax/distribution_lib.py"}]}
| 3,416 | 176 |
gh_patches_debug_39697
|
rasdani/github-patches
|
git_diff
|
freedomofpress__securedrop-6590
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Run flake8 against alembic migrations
*This is a good first issue for new contributors to take on, if you have any questions, please ask on the task or in our [Gitter room](https://gitter.im/freedomofpress/securedrop)!*
## Description
In `.flake8` we currently exclude `securedrop/alembic/versions`. We should be running flake8 against those files by removing the exclusion and fixing any new flake8 errors.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/alembic/versions/c5a02eb52f2d_dropped_session_nonce_from_journalist_.py`
Content:
```
1 """dropped session_nonce from journalist table and revoked tokens table due to new session implementation
2
3 Revision ID: c5a02eb52f2d
4 Revises: b7f98cfd6a70
5 Create Date: 2022-04-16 21:25:22.398189
6
7 """
8 import sqlalchemy as sa
9 from alembic import op
10
11 # revision identifiers, used by Alembic.
12 revision = "c5a02eb52f2d"
13 down_revision = "b7f98cfd6a70"
14 branch_labels = None
15 depends_on = None
16
17
18 def upgrade() -> None:
19 # ### commands auto generated by Alembic - please adjust! ###
20 op.drop_table("revoked_tokens")
21 with op.batch_alter_table("journalists", schema=None) as batch_op:
22 batch_op.drop_column("session_nonce")
23
24 # ### end Alembic commands ###
25
26
27 def downgrade() -> None:
28 """This would have been the easy way, however previous does not have
29 default value and thus up/down assertion fails"""
30 # op.add_column('journalists', sa.Column('session_nonce', sa.Integer(), nullable=False, server_default='0'))
31
32 conn = op.get_bind()
33 conn.execute("PRAGMA legacy_alter_table=ON")
34 # Save existing journalist table.
35 op.rename_table("journalists", "journalists_tmp")
36
37 # Add nonce column.
38 op.add_column("journalists_tmp", sa.Column("session_nonce", sa.Integer()))
39
40 # Populate nonce column.
41 journalists = conn.execute(sa.text("SELECT * FROM journalists_tmp")).fetchall()
42
43 for journalist in journalists:
44 conn.execute(
45 sa.text(
46 """UPDATE journalists_tmp SET session_nonce=0 WHERE
47 id=:id"""
48 ).bindparams(id=journalist.id)
49 )
50
51 # Now create new table with null constraint applied.
52 op.create_table(
53 "journalists",
54 sa.Column("id", sa.Integer(), nullable=False),
55 sa.Column("uuid", sa.String(length=36), nullable=False),
56 sa.Column("username", sa.String(length=255), nullable=False),
57 sa.Column("first_name", sa.String(length=255), nullable=True),
58 sa.Column("last_name", sa.String(length=255), nullable=True),
59 sa.Column("pw_salt", sa.Binary(), nullable=True),
60 sa.Column("pw_hash", sa.Binary(), nullable=True),
61 sa.Column("passphrase_hash", sa.String(length=256), nullable=True),
62 sa.Column("is_admin", sa.Boolean(), nullable=True),
63 sa.Column("session_nonce", sa.Integer(), nullable=False),
64 sa.Column("otp_secret", sa.String(length=32), nullable=True),
65 sa.Column("is_totp", sa.Boolean(), nullable=True),
66 sa.Column("hotp_counter", sa.Integer(), nullable=True),
67 sa.Column("last_token", sa.String(length=6), nullable=True),
68 sa.Column("created_on", sa.DateTime(), nullable=True),
69 sa.Column("last_access", sa.DateTime(), nullable=True),
70 sa.PrimaryKeyConstraint("id"),
71 sa.UniqueConstraint("username"),
72 sa.UniqueConstraint("uuid"),
73 )
74
75 conn.execute(
76 """
77 INSERT INTO journalists
78 SELECT id, uuid, username, first_name, last_name, pw_salt, pw_hash,
79 passphrase_hash, is_admin, session_nonce, otp_secret, is_totp,
80 hotp_counter, last_token, created_on, last_access
81 FROM journalists_tmp
82 """
83 )
84
85 # Now delete the old table.
86 op.drop_table("journalists_tmp")
87
88 op.create_table(
89 "revoked_tokens",
90 sa.Column("id", sa.INTEGER(), nullable=False),
91 sa.Column("journalist_id", sa.INTEGER(), nullable=False),
92 sa.Column("token", sa.TEXT(), nullable=False),
93 sa.ForeignKeyConstraint(
94 ["journalist_id"],
95 ["journalists.id"],
96 ),
97 sa.PrimaryKeyConstraint("id"),
98 sa.UniqueConstraint("token"),
99 )
100
```
Path: `securedrop/alembic/versions/b7f98cfd6a70_make_filesystem_id_non_nullable.py`
Content:
```
1 """make_filesystem_id_non_nullable
2
3 Revision ID: b7f98cfd6a70
4 Revises: d9d36b6f4d1e
5 Create Date: 2022-03-18 18:10:27.842201
6
7 """
8 import sqlalchemy as sa
9 from alembic import op
10
11 # revision identifiers, used by Alembic.
12 revision = "b7f98cfd6a70"
13 down_revision = "d9d36b6f4d1e"
14 branch_labels = None
15 depends_on = None
16
17
18 def upgrade() -> None:
19 # Not having a filesystem_id makes the source useless, so if any of those do exist, we'll
20 # delete them first, as part of this migration.
21 # Because we can't rely on SQLAlchemy's cascade deletion, we have to do it manually.
22 # First we delete out of replies/seen_files/seen_messages (things that refer to things that refer
23 # to sources)
24 op.execute(
25 "DELETE FROM seen_replies WHERE reply_id IN ("
26 "SELECT replies.id FROM replies "
27 "JOIN sources ON sources.id=replies.source_id "
28 "WHERE filesystem_id IS NULL)"
29 )
30 op.execute(
31 "DELETE FROM seen_files WHERE file_id IN ("
32 "SELECT submissions.id FROM submissions "
33 "JOIN sources ON sources.id=submissions.source_id "
34 "WHERE filesystem_id IS NULL)"
35 )
36 op.execute(
37 "DELETE FROM seen_messages WHERE message_id IN ("
38 "SELECT submissions.id FROM submissions "
39 "JOIN sources ON sources.id=submissions.source_id "
40 "WHERE filesystem_id IS NULL)"
41 )
42 # Now things that directly refer to sources
43 for table in ("source_stars", "submissions", "replies"):
44 op.execute(
45 f"DELETE FROM {table} WHERE source_id IN " # nosec
46 f"(SELECT id FROM sources WHERE filesystem_id IS NULL)"
47 ) # nosec
48 # And now the sources
49 op.execute("DELETE FROM sources WHERE filesystem_id IS NULL")
50 with op.batch_alter_table("sources", schema=None) as batch_op:
51 batch_op.alter_column("filesystem_id", existing_type=sa.VARCHAR(length=96), nullable=False)
52
53
54 def downgrade() -> None:
55 with op.batch_alter_table("sources", schema=None) as batch_op:
56 batch_op.alter_column("filesystem_id", existing_type=sa.VARCHAR(length=96), nullable=True)
57
```
Path: `securedrop/alembic/versions/2e24fc7536e8_make_journalist_id_non_nullable.py`
Content:
```
1 """make journalist_id non-nullable
2
3 Revision ID: 2e24fc7536e8
4 Revises: de00920916bf
5 Create Date: 2022-01-12 19:31:06.186285
6
7 """
8 import os
9 import uuid
10
11 import pyotp
12 import sqlalchemy as sa
13 from alembic import op
14 from passlib.hash import argon2
15
16 # raise the errors if we're not in production
17 raise_errors = os.environ.get("SECUREDROP_ENV", "prod") != "prod"
18
19 try:
20 from models import ARGON2_PARAMS
21 from passphrases import PassphraseGenerator
22 except: # noqa
23 if raise_errors:
24 raise
25
26
27 # revision identifiers, used by Alembic.
28 revision = "2e24fc7536e8"
29 down_revision = "de00920916bf"
30 branch_labels = None
31 depends_on = None
32
33
34 def generate_passphrase_hash() -> str:
35 passphrase = PassphraseGenerator.get_default().generate_passphrase()
36 return argon2.using(**ARGON2_PARAMS).hash(passphrase)
37
38
39 def create_deleted() -> int:
40 """manually insert a "deleted" journalist user.
41
42 We need to do it this way since the model will reflect the current state of
43 the schema, not what it is at the current migration step
44
45 It should be basically identical to what Journalist.get_deleted() does
46 """
47 op.execute(
48 sa.text(
49 """\
50 INSERT INTO journalists (uuid, username, session_nonce, passphrase_hash, otp_secret)
51 VALUES (:uuid, "deleted", 0, :passphrase_hash, :otp_secret);
52 """
53 ).bindparams(
54 uuid=str(uuid.uuid4()),
55 passphrase_hash=generate_passphrase_hash(),
56 otp_secret=pyotp.random_base32(),
57 )
58 )
59 # Get the autoincrement ID back
60 conn = op.get_bind()
61 result = conn.execute('SELECT id FROM journalists WHERE username="deleted";').fetchall()
62 return result[0][0]
63
64
65 def migrate_nulls() -> None:
66 """migrate existing journalist_id=NULL over to deleted or delete them"""
67 op.execute("DELETE FROM journalist_login_attempt WHERE journalist_id IS NULL;")
68 op.execute("DELETE FROM revoked_tokens WHERE journalist_id IS NULL;")
69 # Look to see if we have data to migrate
70 tables = ("replies", "seen_files", "seen_messages", "seen_replies")
71 needs_migration = []
72 conn = op.get_bind()
73 for table in tables:
74 result = conn.execute( # nosec
75 f"SELECT 1 FROM {table} WHERE journalist_id IS NULL;"
76 ).first()
77 if result is not None:
78 needs_migration.append(table)
79
80 if not needs_migration:
81 return
82
83 deleted_id = create_deleted()
84 for table in needs_migration:
85 # The seen_ tables have UNIQUE(fk_id, journalist_id), so the deleted journalist can only have
86 # seen each item once. It is possible multiple NULL journalist have seen the same thing so we
87 # do this update in two passes.
88 # First we update as many rows to point to the deleted journalist as possible, ignoring any
89 # unique key violations.
90 op.execute(
91 sa.text(
92 f"UPDATE OR IGNORE {table} SET journalist_id=:journalist_id WHERE journalist_id IS NULL;"
93 ).bindparams(journalist_id=deleted_id)
94 )
95 # Then we delete any leftovers which had been ignored earlier.
96 op.execute(f"DELETE FROM {table} WHERE journalist_id IS NULL") # nosec
97
98
99 def upgrade() -> None:
100 migrate_nulls()
101
102 with op.batch_alter_table("journalist_login_attempt", schema=None) as batch_op:
103 batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=False)
104
105 with op.batch_alter_table("replies", schema=None) as batch_op:
106 batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=False)
107
108 with op.batch_alter_table("revoked_tokens", schema=None) as batch_op:
109 batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=False)
110
111 with op.batch_alter_table("seen_files", schema=None) as batch_op:
112 batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=False)
113
114 with op.batch_alter_table("seen_messages", schema=None) as batch_op:
115 batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=False)
116
117 with op.batch_alter_table("seen_replies", schema=None) as batch_op:
118 batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=False)
119
120
121 def downgrade() -> None:
122 # We do not un-migrate the data back to journalist_id=NULL
123
124 with op.batch_alter_table("seen_replies", schema=None) as batch_op:
125 batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=True)
126
127 with op.batch_alter_table("seen_messages", schema=None) as batch_op:
128 batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=True)
129
130 with op.batch_alter_table("seen_files", schema=None) as batch_op:
131 batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=True)
132
133 with op.batch_alter_table("revoked_tokens", schema=None) as batch_op:
134 batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=True)
135
136 with op.batch_alter_table("replies", schema=None) as batch_op:
137 batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=True)
138
139 with op.batch_alter_table("journalist_login_attempt", schema=None) as batch_op:
140 batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=True)
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/securedrop/alembic/versions/2e24fc7536e8_make_journalist_id_non_nullable.py b/securedrop/alembic/versions/2e24fc7536e8_make_journalist_id_non_nullable.py
--- a/securedrop/alembic/versions/2e24fc7536e8_make_journalist_id_non_nullable.py
+++ b/securedrop/alembic/versions/2e24fc7536e8_make_journalist_id_non_nullable.py
@@ -82,14 +82,15 @@
deleted_id = create_deleted()
for table in needs_migration:
- # The seen_ tables have UNIQUE(fk_id, journalist_id), so the deleted journalist can only have
- # seen each item once. It is possible multiple NULL journalist have seen the same thing so we
- # do this update in two passes.
+ # The seen_ tables have UNIQUE(fk_id, journalist_id), so the deleted journalist can only
+ # have seen each item once. It is possible multiple NULL journalist have seen the same thing
+ # so we do this update in two passes.
# First we update as many rows to point to the deleted journalist as possible, ignoring any
# unique key violations.
op.execute(
sa.text(
- f"UPDATE OR IGNORE {table} SET journalist_id=:journalist_id WHERE journalist_id IS NULL;"
+ f"UPDATE OR IGNORE {table} SET journalist_id=:journalist_id "
+ "WHERE journalist_id IS NULL;"
).bindparams(journalist_id=deleted_id)
)
# Then we delete any leftovers which had been ignored earlier.
diff --git a/securedrop/alembic/versions/b7f98cfd6a70_make_filesystem_id_non_nullable.py b/securedrop/alembic/versions/b7f98cfd6a70_make_filesystem_id_non_nullable.py
--- a/securedrop/alembic/versions/b7f98cfd6a70_make_filesystem_id_non_nullable.py
+++ b/securedrop/alembic/versions/b7f98cfd6a70_make_filesystem_id_non_nullable.py
@@ -19,8 +19,8 @@
# Not having a filesystem_id makes the source useless, so if any of those do exist, we'll
# delete them first, as part of this migration.
# Because we can't rely on SQLAlchemy's cascade deletion, we have to do it manually.
- # First we delete out of replies/seen_files/seen_messages (things that refer to things that refer
- # to sources)
+ # First we delete out of replies/seen_files/seen_messages (things that refer to things
+ # (source_stars/submissions/replies) that refer to sources)
op.execute(
"DELETE FROM seen_replies WHERE reply_id IN ("
"SELECT replies.id FROM replies "
diff --git a/securedrop/alembic/versions/c5a02eb52f2d_dropped_session_nonce_from_journalist_.py b/securedrop/alembic/versions/c5a02eb52f2d_dropped_session_nonce_from_journalist_.py
--- a/securedrop/alembic/versions/c5a02eb52f2d_dropped_session_nonce_from_journalist_.py
+++ b/securedrop/alembic/versions/c5a02eb52f2d_dropped_session_nonce_from_journalist_.py
@@ -1,4 +1,5 @@
-"""dropped session_nonce from journalist table and revoked tokens table due to new session implementation
+"""dropped session_nonce from journalist table and revoked tokens table
+ due to new session implementation
Revision ID: c5a02eb52f2d
Revises: b7f98cfd6a70
@@ -27,7 +28,8 @@
def downgrade() -> None:
"""This would have been the easy way, however previous does not have
default value and thus up/down assertion fails"""
- # op.add_column('journalists', sa.Column('session_nonce', sa.Integer(), nullable=False, server_default='0'))
+ # op.add_column('journalists', sa.Column('session_nonce', sa.Integer(),
+ # nullable=False, server_default='0'))
conn = op.get_bind()
conn.execute("PRAGMA legacy_alter_table=ON")
|
{"golden_diff": "diff --git a/securedrop/alembic/versions/2e24fc7536e8_make_journalist_id_non_nullable.py b/securedrop/alembic/versions/2e24fc7536e8_make_journalist_id_non_nullable.py\n--- a/securedrop/alembic/versions/2e24fc7536e8_make_journalist_id_non_nullable.py\n+++ b/securedrop/alembic/versions/2e24fc7536e8_make_journalist_id_non_nullable.py\n@@ -82,14 +82,15 @@\n \n deleted_id = create_deleted()\n for table in needs_migration:\n- # The seen_ tables have UNIQUE(fk_id, journalist_id), so the deleted journalist can only have\n- # seen each item once. It is possible multiple NULL journalist have seen the same thing so we\n- # do this update in two passes.\n+ # The seen_ tables have UNIQUE(fk_id, journalist_id), so the deleted journalist can only\n+ # have seen each item once. It is possible multiple NULL journalist have seen the same thing\n+ # so we do this update in two passes.\n # First we update as many rows to point to the deleted journalist as possible, ignoring any\n # unique key violations.\n op.execute(\n sa.text(\n- f\"UPDATE OR IGNORE {table} SET journalist_id=:journalist_id WHERE journalist_id IS NULL;\"\n+ f\"UPDATE OR IGNORE {table} SET journalist_id=:journalist_id \"\n+ \"WHERE journalist_id IS NULL;\"\n ).bindparams(journalist_id=deleted_id)\n )\n # Then we delete any leftovers which had been ignored earlier.\ndiff --git a/securedrop/alembic/versions/b7f98cfd6a70_make_filesystem_id_non_nullable.py b/securedrop/alembic/versions/b7f98cfd6a70_make_filesystem_id_non_nullable.py\n--- a/securedrop/alembic/versions/b7f98cfd6a70_make_filesystem_id_non_nullable.py\n+++ b/securedrop/alembic/versions/b7f98cfd6a70_make_filesystem_id_non_nullable.py\n@@ -19,8 +19,8 @@\n # Not having a filesystem_id makes the source useless, so if any of those do exist, we'll\n # delete them first, as part of this migration.\n # Because we can't rely on SQLAlchemy's cascade deletion, we have to do it manually.\n- # First we delete out of replies/seen_files/seen_messages (things that refer to things that refer\n- # to sources)\n+ # First we delete out of replies/seen_files/seen_messages (things that refer to things\n+ # (source_stars/submissions/replies) that refer to sources)\n op.execute(\n \"DELETE FROM seen_replies WHERE reply_id IN (\"\n \"SELECT replies.id FROM replies \"\ndiff --git a/securedrop/alembic/versions/c5a02eb52f2d_dropped_session_nonce_from_journalist_.py b/securedrop/alembic/versions/c5a02eb52f2d_dropped_session_nonce_from_journalist_.py\n--- a/securedrop/alembic/versions/c5a02eb52f2d_dropped_session_nonce_from_journalist_.py\n+++ b/securedrop/alembic/versions/c5a02eb52f2d_dropped_session_nonce_from_journalist_.py\n@@ -1,4 +1,5 @@\n-\"\"\"dropped session_nonce from journalist table and revoked tokens table due to new session implementation\n+\"\"\"dropped session_nonce from journalist table and revoked tokens table\n+ due to new session implementation\n \n Revision ID: c5a02eb52f2d\n Revises: b7f98cfd6a70\n@@ -27,7 +28,8 @@\n def downgrade() -> None:\n \"\"\"This would have been the easy way, however previous does not have\n default value and thus up/down assertion fails\"\"\"\n- # op.add_column('journalists', sa.Column('session_nonce', sa.Integer(), nullable=False, server_default='0'))\n+ # op.add_column('journalists', sa.Column('session_nonce', sa.Integer(),\n+ # nullable=False, server_default='0'))\n \n conn = op.get_bind()\n conn.execute(\"PRAGMA legacy_alter_table=ON\")\n", "issue": "Run flake8 against alembic migrations\n*This is a good first issue for new contributors to take on, if you have any questions, please ask on the task or in our [Gitter room](https://gitter.im/freedomofpress/securedrop)!*\r\n\r\n## Description\r\n\r\nIn `.flake8` we currently exclude `securedrop/alembic/versions`. We should be running flake8 against those files by removing the exclusion and fixing any new flake8 errors.\n", "before_files": [{"content": "\"\"\"dropped session_nonce from journalist table and revoked tokens table due to new session implementation\n\nRevision ID: c5a02eb52f2d\nRevises: b7f98cfd6a70\nCreate Date: 2022-04-16 21:25:22.398189\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"c5a02eb52f2d\"\ndown_revision = \"b7f98cfd6a70\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table(\"revoked_tokens\")\n with op.batch_alter_table(\"journalists\", schema=None) as batch_op:\n batch_op.drop_column(\"session_nonce\")\n\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n \"\"\"This would have been the easy way, however previous does not have\n default value and thus up/down assertion fails\"\"\"\n # op.add_column('journalists', sa.Column('session_nonce', sa.Integer(), nullable=False, server_default='0'))\n\n conn = op.get_bind()\n conn.execute(\"PRAGMA legacy_alter_table=ON\")\n # Save existing journalist table.\n op.rename_table(\"journalists\", \"journalists_tmp\")\n\n # Add nonce column.\n op.add_column(\"journalists_tmp\", sa.Column(\"session_nonce\", sa.Integer()))\n\n # Populate nonce column.\n journalists = conn.execute(sa.text(\"SELECT * FROM journalists_tmp\")).fetchall()\n\n for journalist in journalists:\n conn.execute(\n sa.text(\n \"\"\"UPDATE journalists_tmp SET session_nonce=0 WHERE\n id=:id\"\"\"\n ).bindparams(id=journalist.id)\n )\n\n # Now create new table with null constraint applied.\n op.create_table(\n \"journalists\",\n sa.Column(\"id\", sa.Integer(), nullable=False),\n sa.Column(\"uuid\", sa.String(length=36), nullable=False),\n sa.Column(\"username\", sa.String(length=255), nullable=False),\n sa.Column(\"first_name\", sa.String(length=255), nullable=True),\n sa.Column(\"last_name\", sa.String(length=255), nullable=True),\n sa.Column(\"pw_salt\", sa.Binary(), nullable=True),\n sa.Column(\"pw_hash\", sa.Binary(), nullable=True),\n sa.Column(\"passphrase_hash\", sa.String(length=256), nullable=True),\n sa.Column(\"is_admin\", sa.Boolean(), nullable=True),\n sa.Column(\"session_nonce\", sa.Integer(), nullable=False),\n sa.Column(\"otp_secret\", sa.String(length=32), nullable=True),\n sa.Column(\"is_totp\", sa.Boolean(), nullable=True),\n sa.Column(\"hotp_counter\", sa.Integer(), nullable=True),\n sa.Column(\"last_token\", sa.String(length=6), nullable=True),\n sa.Column(\"created_on\", sa.DateTime(), nullable=True),\n sa.Column(\"last_access\", sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint(\"id\"),\n sa.UniqueConstraint(\"username\"),\n sa.UniqueConstraint(\"uuid\"),\n )\n\n conn.execute(\n \"\"\"\n INSERT INTO journalists\n SELECT id, uuid, username, first_name, last_name, pw_salt, pw_hash,\n passphrase_hash, is_admin, session_nonce, otp_secret, is_totp,\n hotp_counter, last_token, created_on, last_access\n FROM journalists_tmp\n \"\"\"\n )\n\n # Now delete the old table.\n op.drop_table(\"journalists_tmp\")\n\n op.create_table(\n \"revoked_tokens\",\n sa.Column(\"id\", sa.INTEGER(), nullable=False),\n sa.Column(\"journalist_id\", sa.INTEGER(), nullable=False),\n sa.Column(\"token\", sa.TEXT(), nullable=False),\n sa.ForeignKeyConstraint(\n [\"journalist_id\"],\n [\"journalists.id\"],\n ),\n sa.PrimaryKeyConstraint(\"id\"),\n sa.UniqueConstraint(\"token\"),\n )\n", "path": "securedrop/alembic/versions/c5a02eb52f2d_dropped_session_nonce_from_journalist_.py"}, {"content": "\"\"\"make_filesystem_id_non_nullable\n\nRevision ID: b7f98cfd6a70\nRevises: d9d36b6f4d1e\nCreate Date: 2022-03-18 18:10:27.842201\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"b7f98cfd6a70\"\ndown_revision = \"d9d36b6f4d1e\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # Not having a filesystem_id makes the source useless, so if any of those do exist, we'll\n # delete them first, as part of this migration.\n # Because we can't rely on SQLAlchemy's cascade deletion, we have to do it manually.\n # First we delete out of replies/seen_files/seen_messages (things that refer to things that refer\n # to sources)\n op.execute(\n \"DELETE FROM seen_replies WHERE reply_id IN (\"\n \"SELECT replies.id FROM replies \"\n \"JOIN sources ON sources.id=replies.source_id \"\n \"WHERE filesystem_id IS NULL)\"\n )\n op.execute(\n \"DELETE FROM seen_files WHERE file_id IN (\"\n \"SELECT submissions.id FROM submissions \"\n \"JOIN sources ON sources.id=submissions.source_id \"\n \"WHERE filesystem_id IS NULL)\"\n )\n op.execute(\n \"DELETE FROM seen_messages WHERE message_id IN (\"\n \"SELECT submissions.id FROM submissions \"\n \"JOIN sources ON sources.id=submissions.source_id \"\n \"WHERE filesystem_id IS NULL)\"\n )\n # Now things that directly refer to sources\n for table in (\"source_stars\", \"submissions\", \"replies\"):\n op.execute(\n f\"DELETE FROM {table} WHERE source_id IN \" # nosec\n f\"(SELECT id FROM sources WHERE filesystem_id IS NULL)\"\n ) # nosec\n # And now the sources\n op.execute(\"DELETE FROM sources WHERE filesystem_id IS NULL\")\n with op.batch_alter_table(\"sources\", schema=None) as batch_op:\n batch_op.alter_column(\"filesystem_id\", existing_type=sa.VARCHAR(length=96), nullable=False)\n\n\ndef downgrade() -> None:\n with op.batch_alter_table(\"sources\", schema=None) as batch_op:\n batch_op.alter_column(\"filesystem_id\", existing_type=sa.VARCHAR(length=96), nullable=True)\n", "path": "securedrop/alembic/versions/b7f98cfd6a70_make_filesystem_id_non_nullable.py"}, {"content": "\"\"\"make journalist_id non-nullable\n\nRevision ID: 2e24fc7536e8\nRevises: de00920916bf\nCreate Date: 2022-01-12 19:31:06.186285\n\n\"\"\"\nimport os\nimport uuid\n\nimport pyotp\nimport sqlalchemy as sa\nfrom alembic import op\nfrom passlib.hash import argon2\n\n# raise the errors if we're not in production\nraise_errors = os.environ.get(\"SECUREDROP_ENV\", \"prod\") != \"prod\"\n\ntry:\n from models import ARGON2_PARAMS\n from passphrases import PassphraseGenerator\nexcept: # noqa\n if raise_errors:\n raise\n\n\n# revision identifiers, used by Alembic.\nrevision = \"2e24fc7536e8\"\ndown_revision = \"de00920916bf\"\nbranch_labels = None\ndepends_on = None\n\n\ndef generate_passphrase_hash() -> str:\n passphrase = PassphraseGenerator.get_default().generate_passphrase()\n return argon2.using(**ARGON2_PARAMS).hash(passphrase)\n\n\ndef create_deleted() -> int:\n \"\"\"manually insert a \"deleted\" journalist user.\n\n We need to do it this way since the model will reflect the current state of\n the schema, not what it is at the current migration step\n\n It should be basically identical to what Journalist.get_deleted() does\n \"\"\"\n op.execute(\n sa.text(\n \"\"\"\\\n INSERT INTO journalists (uuid, username, session_nonce, passphrase_hash, otp_secret)\n VALUES (:uuid, \"deleted\", 0, :passphrase_hash, :otp_secret);\n \"\"\"\n ).bindparams(\n uuid=str(uuid.uuid4()),\n passphrase_hash=generate_passphrase_hash(),\n otp_secret=pyotp.random_base32(),\n )\n )\n # Get the autoincrement ID back\n conn = op.get_bind()\n result = conn.execute('SELECT id FROM journalists WHERE username=\"deleted\";').fetchall()\n return result[0][0]\n\n\ndef migrate_nulls() -> None:\n \"\"\"migrate existing journalist_id=NULL over to deleted or delete them\"\"\"\n op.execute(\"DELETE FROM journalist_login_attempt WHERE journalist_id IS NULL;\")\n op.execute(\"DELETE FROM revoked_tokens WHERE journalist_id IS NULL;\")\n # Look to see if we have data to migrate\n tables = (\"replies\", \"seen_files\", \"seen_messages\", \"seen_replies\")\n needs_migration = []\n conn = op.get_bind()\n for table in tables:\n result = conn.execute( # nosec\n f\"SELECT 1 FROM {table} WHERE journalist_id IS NULL;\"\n ).first()\n if result is not None:\n needs_migration.append(table)\n\n if not needs_migration:\n return\n\n deleted_id = create_deleted()\n for table in needs_migration:\n # The seen_ tables have UNIQUE(fk_id, journalist_id), so the deleted journalist can only have\n # seen each item once. It is possible multiple NULL journalist have seen the same thing so we\n # do this update in two passes.\n # First we update as many rows to point to the deleted journalist as possible, ignoring any\n # unique key violations.\n op.execute(\n sa.text(\n f\"UPDATE OR IGNORE {table} SET journalist_id=:journalist_id WHERE journalist_id IS NULL;\"\n ).bindparams(journalist_id=deleted_id)\n )\n # Then we delete any leftovers which had been ignored earlier.\n op.execute(f\"DELETE FROM {table} WHERE journalist_id IS NULL\") # nosec\n\n\ndef upgrade() -> None:\n migrate_nulls()\n\n with op.batch_alter_table(\"journalist_login_attempt\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=False)\n\n with op.batch_alter_table(\"replies\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=False)\n\n with op.batch_alter_table(\"revoked_tokens\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=False)\n\n with op.batch_alter_table(\"seen_files\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=False)\n\n with op.batch_alter_table(\"seen_messages\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=False)\n\n with op.batch_alter_table(\"seen_replies\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=False)\n\n\ndef downgrade() -> None:\n # We do not un-migrate the data back to journalist_id=NULL\n\n with op.batch_alter_table(\"seen_replies\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=True)\n\n with op.batch_alter_table(\"seen_messages\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=True)\n\n with op.batch_alter_table(\"seen_files\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=True)\n\n with op.batch_alter_table(\"revoked_tokens\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=True)\n\n with op.batch_alter_table(\"replies\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=True)\n\n with op.batch_alter_table(\"journalist_login_attempt\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=True)\n", "path": "securedrop/alembic/versions/2e24fc7536e8_make_journalist_id_non_nullable.py"}], "after_files": [{"content": "\"\"\"dropped session_nonce from journalist table and revoked tokens table\n due to new session implementation\n\nRevision ID: c5a02eb52f2d\nRevises: b7f98cfd6a70\nCreate Date: 2022-04-16 21:25:22.398189\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"c5a02eb52f2d\"\ndown_revision = \"b7f98cfd6a70\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table(\"revoked_tokens\")\n with op.batch_alter_table(\"journalists\", schema=None) as batch_op:\n batch_op.drop_column(\"session_nonce\")\n\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n \"\"\"This would have been the easy way, however previous does not have\n default value and thus up/down assertion fails\"\"\"\n # op.add_column('journalists', sa.Column('session_nonce', sa.Integer(),\n # nullable=False, server_default='0'))\n\n conn = op.get_bind()\n conn.execute(\"PRAGMA legacy_alter_table=ON\")\n # Save existing journalist table.\n op.rename_table(\"journalists\", \"journalists_tmp\")\n\n # Add nonce column.\n op.add_column(\"journalists_tmp\", sa.Column(\"session_nonce\", sa.Integer()))\n\n # Populate nonce column.\n journalists = conn.execute(sa.text(\"SELECT * FROM journalists_tmp\")).fetchall()\n\n for journalist in journalists:\n conn.execute(\n sa.text(\n \"\"\"UPDATE journalists_tmp SET session_nonce=0 WHERE\n id=:id\"\"\"\n ).bindparams(id=journalist.id)\n )\n\n # Now create new table with null constraint applied.\n op.create_table(\n \"journalists\",\n sa.Column(\"id\", sa.Integer(), nullable=False),\n sa.Column(\"uuid\", sa.String(length=36), nullable=False),\n sa.Column(\"username\", sa.String(length=255), nullable=False),\n sa.Column(\"first_name\", sa.String(length=255), nullable=True),\n sa.Column(\"last_name\", sa.String(length=255), nullable=True),\n sa.Column(\"pw_salt\", sa.Binary(), nullable=True),\n sa.Column(\"pw_hash\", sa.Binary(), nullable=True),\n sa.Column(\"passphrase_hash\", sa.String(length=256), nullable=True),\n sa.Column(\"is_admin\", sa.Boolean(), nullable=True),\n sa.Column(\"session_nonce\", sa.Integer(), nullable=False),\n sa.Column(\"otp_secret\", sa.String(length=32), nullable=True),\n sa.Column(\"is_totp\", sa.Boolean(), nullable=True),\n sa.Column(\"hotp_counter\", sa.Integer(), nullable=True),\n sa.Column(\"last_token\", sa.String(length=6), nullable=True),\n sa.Column(\"created_on\", sa.DateTime(), nullable=True),\n sa.Column(\"last_access\", sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint(\"id\"),\n sa.UniqueConstraint(\"username\"),\n sa.UniqueConstraint(\"uuid\"),\n )\n\n conn.execute(\n \"\"\"\n INSERT INTO journalists\n SELECT id, uuid, username, first_name, last_name, pw_salt, pw_hash,\n passphrase_hash, is_admin, session_nonce, otp_secret, is_totp,\n hotp_counter, last_token, created_on, last_access\n FROM journalists_tmp\n \"\"\"\n )\n\n # Now delete the old table.\n op.drop_table(\"journalists_tmp\")\n\n op.create_table(\n \"revoked_tokens\",\n sa.Column(\"id\", sa.INTEGER(), nullable=False),\n sa.Column(\"journalist_id\", sa.INTEGER(), nullable=False),\n sa.Column(\"token\", sa.TEXT(), nullable=False),\n sa.ForeignKeyConstraint(\n [\"journalist_id\"],\n [\"journalists.id\"],\n ),\n sa.PrimaryKeyConstraint(\"id\"),\n sa.UniqueConstraint(\"token\"),\n )\n", "path": "securedrop/alembic/versions/c5a02eb52f2d_dropped_session_nonce_from_journalist_.py"}, {"content": "\"\"\"make_filesystem_id_non_nullable\n\nRevision ID: b7f98cfd6a70\nRevises: d9d36b6f4d1e\nCreate Date: 2022-03-18 18:10:27.842201\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"b7f98cfd6a70\"\ndown_revision = \"d9d36b6f4d1e\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # Not having a filesystem_id makes the source useless, so if any of those do exist, we'll\n # delete them first, as part of this migration.\n # Because we can't rely on SQLAlchemy's cascade deletion, we have to do it manually.\n # First we delete out of replies/seen_files/seen_messages (things that refer to things\n # (source_stars/submissions/replies) that refer to sources)\n op.execute(\n \"DELETE FROM seen_replies WHERE reply_id IN (\"\n \"SELECT replies.id FROM replies \"\n \"JOIN sources ON sources.id=replies.source_id \"\n \"WHERE filesystem_id IS NULL)\"\n )\n op.execute(\n \"DELETE FROM seen_files WHERE file_id IN (\"\n \"SELECT submissions.id FROM submissions \"\n \"JOIN sources ON sources.id=submissions.source_id \"\n \"WHERE filesystem_id IS NULL)\"\n )\n op.execute(\n \"DELETE FROM seen_messages WHERE message_id IN (\"\n \"SELECT submissions.id FROM submissions \"\n \"JOIN sources ON sources.id=submissions.source_id \"\n \"WHERE filesystem_id IS NULL)\"\n )\n # Now things that directly refer to sources\n for table in (\"source_stars\", \"submissions\", \"replies\"):\n op.execute(\n f\"DELETE FROM {table} WHERE source_id IN \" # nosec\n f\"(SELECT id FROM sources WHERE filesystem_id IS NULL)\"\n ) # nosec\n # And now the sources\n op.execute(\"DELETE FROM sources WHERE filesystem_id IS NULL\")\n with op.batch_alter_table(\"sources\", schema=None) as batch_op:\n batch_op.alter_column(\"filesystem_id\", existing_type=sa.VARCHAR(length=96), nullable=False)\n\n\ndef downgrade() -> None:\n with op.batch_alter_table(\"sources\", schema=None) as batch_op:\n batch_op.alter_column(\"filesystem_id\", existing_type=sa.VARCHAR(length=96), nullable=True)\n", "path": "securedrop/alembic/versions/b7f98cfd6a70_make_filesystem_id_non_nullable.py"}, {"content": "\"\"\"make journalist_id non-nullable\n\nRevision ID: 2e24fc7536e8\nRevises: de00920916bf\nCreate Date: 2022-01-12 19:31:06.186285\n\n\"\"\"\nimport os\nimport uuid\n\nimport pyotp\nimport sqlalchemy as sa\nfrom alembic import op\nfrom passlib.hash import argon2\n\n# raise the errors if we're not in production\nraise_errors = os.environ.get(\"SECUREDROP_ENV\", \"prod\") != \"prod\"\n\ntry:\n from models import ARGON2_PARAMS\n from passphrases import PassphraseGenerator\nexcept: # noqa\n if raise_errors:\n raise\n\n\n# revision identifiers, used by Alembic.\nrevision = \"2e24fc7536e8\"\ndown_revision = \"de00920916bf\"\nbranch_labels = None\ndepends_on = None\n\n\ndef generate_passphrase_hash() -> str:\n passphrase = PassphraseGenerator.get_default().generate_passphrase()\n return argon2.using(**ARGON2_PARAMS).hash(passphrase)\n\n\ndef create_deleted() -> int:\n \"\"\"manually insert a \"deleted\" journalist user.\n\n We need to do it this way since the model will reflect the current state of\n the schema, not what it is at the current migration step\n\n It should be basically identical to what Journalist.get_deleted() does\n \"\"\"\n op.execute(\n sa.text(\n \"\"\"\\\n INSERT INTO journalists (uuid, username, session_nonce, passphrase_hash, otp_secret)\n VALUES (:uuid, \"deleted\", 0, :passphrase_hash, :otp_secret);\n \"\"\"\n ).bindparams(\n uuid=str(uuid.uuid4()),\n passphrase_hash=generate_passphrase_hash(),\n otp_secret=pyotp.random_base32(),\n )\n )\n # Get the autoincrement ID back\n conn = op.get_bind()\n result = conn.execute('SELECT id FROM journalists WHERE username=\"deleted\";').fetchall()\n return result[0][0]\n\n\ndef migrate_nulls() -> None:\n \"\"\"migrate existing journalist_id=NULL over to deleted or delete them\"\"\"\n op.execute(\"DELETE FROM journalist_login_attempt WHERE journalist_id IS NULL;\")\n op.execute(\"DELETE FROM revoked_tokens WHERE journalist_id IS NULL;\")\n # Look to see if we have data to migrate\n tables = (\"replies\", \"seen_files\", \"seen_messages\", \"seen_replies\")\n needs_migration = []\n conn = op.get_bind()\n for table in tables:\n result = conn.execute( # nosec\n f\"SELECT 1 FROM {table} WHERE journalist_id IS NULL;\"\n ).first()\n if result is not None:\n needs_migration.append(table)\n\n if not needs_migration:\n return\n\n deleted_id = create_deleted()\n for table in needs_migration:\n # The seen_ tables have UNIQUE(fk_id, journalist_id), so the deleted journalist can only\n # have seen each item once. It is possible multiple NULL journalist have seen the same thing\n # so we do this update in two passes.\n # First we update as many rows to point to the deleted journalist as possible, ignoring any\n # unique key violations.\n op.execute(\n sa.text(\n f\"UPDATE OR IGNORE {table} SET journalist_id=:journalist_id \"\n \"WHERE journalist_id IS NULL;\"\n ).bindparams(journalist_id=deleted_id)\n )\n # Then we delete any leftovers which had been ignored earlier.\n op.execute(f\"DELETE FROM {table} WHERE journalist_id IS NULL\") # nosec\n\n\ndef upgrade() -> None:\n migrate_nulls()\n\n with op.batch_alter_table(\"journalist_login_attempt\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=False)\n\n with op.batch_alter_table(\"replies\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=False)\n\n with op.batch_alter_table(\"revoked_tokens\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=False)\n\n with op.batch_alter_table(\"seen_files\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=False)\n\n with op.batch_alter_table(\"seen_messages\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=False)\n\n with op.batch_alter_table(\"seen_replies\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=False)\n\n\ndef downgrade() -> None:\n # We do not un-migrate the data back to journalist_id=NULL\n\n with op.batch_alter_table(\"seen_replies\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=True)\n\n with op.batch_alter_table(\"seen_messages\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=True)\n\n with op.batch_alter_table(\"seen_files\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=True)\n\n with op.batch_alter_table(\"revoked_tokens\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=True)\n\n with op.batch_alter_table(\"replies\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=True)\n\n with op.batch_alter_table(\"journalist_login_attempt\", schema=None) as batch_op:\n batch_op.alter_column(\"journalist_id\", existing_type=sa.INTEGER(), nullable=True)\n", "path": "securedrop/alembic/versions/2e24fc7536e8_make_journalist_id_non_nullable.py"}]}
| 3,799 | 990 |
gh_patches_debug_56983
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-contrib-172
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove unused import
As per comment https://github.com/open-telemetry/opentelemetry-python-contrib/pull/107#discussion_r516262746, there appears to be an unused import in the jinja2 instrumentation
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16
17 Usage
18 -----
19
20 The OpenTelemetry ``jinja2`` integration traces templates loading, compilation
21 and rendering.
22
23 Usage
24 -----
25
26 .. code-block:: python
27
28 from jinja2 import Environment, FileSystemLoader
29 from opentelemetry.instrumentation.jinja2 import Jinja2Instrumentor
30 from opentelemetry import trace
31 from opentelemetry.trace import TracerProvider
32
33 trace.set_tracer_provider(TracerProvider())
34
35 Jinja2Instrumentor().instrument()
36
37 env = Environment(loader=FileSystemLoader("templates"))
38 template = env.get_template("mytemplate.html")
39
40 API
41 ---
42 """
43 # pylint: disable=no-value-for-parameter
44
45 import logging
46
47 import jinja2
48 from wrapt import ObjectProxy
49 from wrapt import wrap_function_wrapper as _wrap
50
51 from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
52 from opentelemetry.instrumentation.jinja2.version import __version__
53 from opentelemetry.instrumentation.utils import unwrap
54 from opentelemetry.trace import SpanKind, get_tracer
55 from opentelemetry.trace.status import Status, StatusCode
56
57 logger = logging.getLogger(__name__)
58
59 ATTRIBUTE_JINJA2_TEMPLATE_NAME = "jinja2.template_name"
60 ATTRIBUTE_JINJA2_TEMPLATE_PATH = "jinja2.template_path"
61 DEFAULT_TEMPLATE_NAME = "<memory>"
62
63
64 def _with_tracer_wrapper(func):
65 """Helper for providing tracer for wrapper functions.
66 """
67
68 def _with_tracer(tracer):
69 def wrapper(wrapped, instance, args, kwargs):
70 return func(tracer, wrapped, instance, args, kwargs)
71
72 return wrapper
73
74 return _with_tracer
75
76
77 @_with_tracer_wrapper
78 def _wrap_render(tracer, wrapped, instance, args, kwargs):
79 """Wrap `Template.render()` or `Template.generate()`
80 """
81 with tracer.start_as_current_span(
82 "jinja2.render", kind=SpanKind.INTERNAL,
83 ) as span:
84 if span.is_recording():
85 template_name = instance.name or DEFAULT_TEMPLATE_NAME
86 span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)
87 return wrapped(*args, **kwargs)
88
89
90 @_with_tracer_wrapper
91 def _wrap_compile(tracer, wrapped, _, args, kwargs):
92 with tracer.start_as_current_span(
93 "jinja2.compile", kind=SpanKind.INTERNAL,
94 ) as span:
95 if span.is_recording():
96 template_name = (
97 args[1]
98 if len(args) > 1
99 else kwargs.get("name", DEFAULT_TEMPLATE_NAME)
100 )
101 span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)
102 return wrapped(*args, **kwargs)
103
104
105 @_with_tracer_wrapper
106 def _wrap_load_template(tracer, wrapped, _, args, kwargs):
107 with tracer.start_as_current_span(
108 "jinja2.load", kind=SpanKind.INTERNAL,
109 ) as span:
110 if span.is_recording():
111 template_name = kwargs.get("name", args[0])
112 span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)
113 template = None
114 try:
115 template = wrapped(*args, **kwargs)
116 return template
117 finally:
118 if template and span.is_recording():
119 span.set_attribute(
120 ATTRIBUTE_JINJA2_TEMPLATE_PATH, template.filename
121 )
122
123
124 class Jinja2Instrumentor(BaseInstrumentor):
125 """An instrumentor for jinja2
126
127 See `BaseInstrumentor`
128 """
129
130 def _instrument(self, **kwargs):
131 tracer_provider = kwargs.get("tracer_provider")
132 tracer = get_tracer(__name__, __version__, tracer_provider)
133
134 _wrap(jinja2, "environment.Template.render", _wrap_render(tracer))
135 _wrap(jinja2, "environment.Template.generate", _wrap_render(tracer))
136 _wrap(jinja2, "environment.Environment.compile", _wrap_compile(tracer))
137 _wrap(
138 jinja2,
139 "environment.Environment._load_template",
140 _wrap_load_template(tracer),
141 )
142
143 def _uninstrument(self, **kwargs):
144 unwrap(jinja2.Template, "render")
145 unwrap(jinja2.Template, "generate")
146 unwrap(jinja2.Environment, "compile")
147 unwrap(jinja2.Environment, "_load_template")
148
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py b/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py
--- a/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py
@@ -52,7 +52,6 @@
from opentelemetry.instrumentation.jinja2.version import __version__
from opentelemetry.instrumentation.utils import unwrap
from opentelemetry.trace import SpanKind, get_tracer
-from opentelemetry.trace.status import Status, StatusCode
logger = logging.getLogger(__name__)
|
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py b/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py\n@@ -52,7 +52,6 @@\n from opentelemetry.instrumentation.jinja2.version import __version__\n from opentelemetry.instrumentation.utils import unwrap\n from opentelemetry.trace import SpanKind, get_tracer\n-from opentelemetry.trace.status import Status, StatusCode\n \n logger = logging.getLogger(__name__)\n", "issue": "Remove unused import\nAs per comment https://github.com/open-telemetry/opentelemetry-python-contrib/pull/107#discussion_r516262746, there appears to be an unused import in the jinja2 instrumentation\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n\nUsage\n-----\n\nThe OpenTelemetry ``jinja2`` integration traces templates loading, compilation\nand rendering.\n\nUsage\n-----\n\n.. code-block:: python\n\n from jinja2 import Environment, FileSystemLoader\n from opentelemetry.instrumentation.jinja2 import Jinja2Instrumentor\n from opentelemetry import trace\n from opentelemetry.trace import TracerProvider\n\n trace.set_tracer_provider(TracerProvider())\n\n Jinja2Instrumentor().instrument()\n\n env = Environment(loader=FileSystemLoader(\"templates\"))\n template = env.get_template(\"mytemplate.html\")\n\nAPI\n---\n\"\"\"\n# pylint: disable=no-value-for-parameter\n\nimport logging\n\nimport jinja2\nfrom wrapt import ObjectProxy\nfrom wrapt import wrap_function_wrapper as _wrap\n\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.jinja2.version import __version__\nfrom opentelemetry.instrumentation.utils import unwrap\nfrom opentelemetry.trace import SpanKind, get_tracer\nfrom opentelemetry.trace.status import Status, StatusCode\n\nlogger = logging.getLogger(__name__)\n\nATTRIBUTE_JINJA2_TEMPLATE_NAME = \"jinja2.template_name\"\nATTRIBUTE_JINJA2_TEMPLATE_PATH = \"jinja2.template_path\"\nDEFAULT_TEMPLATE_NAME = \"<memory>\"\n\n\ndef _with_tracer_wrapper(func):\n \"\"\"Helper for providing tracer for wrapper functions.\n \"\"\"\n\n def _with_tracer(tracer):\n def wrapper(wrapped, instance, args, kwargs):\n return func(tracer, wrapped, instance, args, kwargs)\n\n return wrapper\n\n return _with_tracer\n\n\n@_with_tracer_wrapper\ndef _wrap_render(tracer, wrapped, instance, args, kwargs):\n \"\"\"Wrap `Template.render()` or `Template.generate()`\n \"\"\"\n with tracer.start_as_current_span(\n \"jinja2.render\", kind=SpanKind.INTERNAL,\n ) as span:\n if span.is_recording():\n template_name = instance.name or DEFAULT_TEMPLATE_NAME\n span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)\n return wrapped(*args, **kwargs)\n\n\n@_with_tracer_wrapper\ndef _wrap_compile(tracer, wrapped, _, args, kwargs):\n with tracer.start_as_current_span(\n \"jinja2.compile\", kind=SpanKind.INTERNAL,\n ) as span:\n if span.is_recording():\n template_name = (\n args[1]\n if len(args) > 1\n else kwargs.get(\"name\", DEFAULT_TEMPLATE_NAME)\n )\n span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)\n return wrapped(*args, **kwargs)\n\n\n@_with_tracer_wrapper\ndef _wrap_load_template(tracer, wrapped, _, args, kwargs):\n with tracer.start_as_current_span(\n \"jinja2.load\", kind=SpanKind.INTERNAL,\n ) as span:\n if span.is_recording():\n template_name = kwargs.get(\"name\", args[0])\n span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)\n template = None\n try:\n template = wrapped(*args, **kwargs)\n return template\n finally:\n if template and span.is_recording():\n span.set_attribute(\n ATTRIBUTE_JINJA2_TEMPLATE_PATH, template.filename\n )\n\n\nclass Jinja2Instrumentor(BaseInstrumentor):\n \"\"\"An instrumentor for jinja2\n\n See `BaseInstrumentor`\n \"\"\"\n\n def _instrument(self, **kwargs):\n tracer_provider = kwargs.get(\"tracer_provider\")\n tracer = get_tracer(__name__, __version__, tracer_provider)\n\n _wrap(jinja2, \"environment.Template.render\", _wrap_render(tracer))\n _wrap(jinja2, \"environment.Template.generate\", _wrap_render(tracer))\n _wrap(jinja2, \"environment.Environment.compile\", _wrap_compile(tracer))\n _wrap(\n jinja2,\n \"environment.Environment._load_template\",\n _wrap_load_template(tracer),\n )\n\n def _uninstrument(self, **kwargs):\n unwrap(jinja2.Template, \"render\")\n unwrap(jinja2.Template, \"generate\")\n unwrap(jinja2.Environment, \"compile\")\n unwrap(jinja2.Environment, \"_load_template\")\n", "path": "instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n\nUsage\n-----\n\nThe OpenTelemetry ``jinja2`` integration traces templates loading, compilation\nand rendering.\n\nUsage\n-----\n\n.. code-block:: python\n\n from jinja2 import Environment, FileSystemLoader\n from opentelemetry.instrumentation.jinja2 import Jinja2Instrumentor\n from opentelemetry import trace\n from opentelemetry.trace import TracerProvider\n\n trace.set_tracer_provider(TracerProvider())\n\n Jinja2Instrumentor().instrument()\n\n env = Environment(loader=FileSystemLoader(\"templates\"))\n template = env.get_template(\"mytemplate.html\")\n\nAPI\n---\n\"\"\"\n# pylint: disable=no-value-for-parameter\n\nimport logging\n\nimport jinja2\nfrom wrapt import ObjectProxy\nfrom wrapt import wrap_function_wrapper as _wrap\n\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.jinja2.version import __version__\nfrom opentelemetry.instrumentation.utils import unwrap\nfrom opentelemetry.trace import SpanKind, get_tracer\n\nlogger = logging.getLogger(__name__)\n\nATTRIBUTE_JINJA2_TEMPLATE_NAME = \"jinja2.template_name\"\nATTRIBUTE_JINJA2_TEMPLATE_PATH = \"jinja2.template_path\"\nDEFAULT_TEMPLATE_NAME = \"<memory>\"\n\n\ndef _with_tracer_wrapper(func):\n \"\"\"Helper for providing tracer for wrapper functions.\n \"\"\"\n\n def _with_tracer(tracer):\n def wrapper(wrapped, instance, args, kwargs):\n return func(tracer, wrapped, instance, args, kwargs)\n\n return wrapper\n\n return _with_tracer\n\n\n@_with_tracer_wrapper\ndef _wrap_render(tracer, wrapped, instance, args, kwargs):\n \"\"\"Wrap `Template.render()` or `Template.generate()`\n \"\"\"\n with tracer.start_as_current_span(\n \"jinja2.render\", kind=SpanKind.INTERNAL,\n ) as span:\n if span.is_recording():\n template_name = instance.name or DEFAULT_TEMPLATE_NAME\n span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)\n return wrapped(*args, **kwargs)\n\n\n@_with_tracer_wrapper\ndef _wrap_compile(tracer, wrapped, _, args, kwargs):\n with tracer.start_as_current_span(\n \"jinja2.compile\", kind=SpanKind.INTERNAL,\n ) as span:\n if span.is_recording():\n template_name = (\n args[1]\n if len(args) > 1\n else kwargs.get(\"name\", DEFAULT_TEMPLATE_NAME)\n )\n span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)\n return wrapped(*args, **kwargs)\n\n\n@_with_tracer_wrapper\ndef _wrap_load_template(tracer, wrapped, _, args, kwargs):\n with tracer.start_as_current_span(\n \"jinja2.load\", kind=SpanKind.INTERNAL,\n ) as span:\n if span.is_recording():\n template_name = kwargs.get(\"name\", args[0])\n span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)\n template = None\n try:\n template = wrapped(*args, **kwargs)\n return template\n finally:\n if template and span.is_recording():\n span.set_attribute(\n ATTRIBUTE_JINJA2_TEMPLATE_PATH, template.filename\n )\n\n\nclass Jinja2Instrumentor(BaseInstrumentor):\n \"\"\"An instrumentor for jinja2\n\n See `BaseInstrumentor`\n \"\"\"\n\n def _instrument(self, **kwargs):\n tracer_provider = kwargs.get(\"tracer_provider\")\n tracer = get_tracer(__name__, __version__, tracer_provider)\n\n _wrap(jinja2, \"environment.Template.render\", _wrap_render(tracer))\n _wrap(jinja2, \"environment.Template.generate\", _wrap_render(tracer))\n _wrap(jinja2, \"environment.Environment.compile\", _wrap_compile(tracer))\n _wrap(\n jinja2,\n \"environment.Environment._load_template\",\n _wrap_load_template(tracer),\n )\n\n def _uninstrument(self, **kwargs):\n unwrap(jinja2.Template, \"render\")\n unwrap(jinja2.Template, \"generate\")\n unwrap(jinja2.Environment, \"compile\")\n unwrap(jinja2.Environment, \"_load_template\")\n", "path": "instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py"}]}
| 1,704 | 183 |
gh_patches_debug_29928
|
rasdani/github-patches
|
git_diff
|
inventree__InvenTree-1749
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add "issued_by" and "resposible_for" fields to Build Order table
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `InvenTree/build/serializers.py`
Content:
```
1 """
2 JSON serializers for Build API
3 """
4
5 # -*- coding: utf-8 -*-
6 from __future__ import unicode_literals
7
8 from django.db.models import Case, When, Value
9 from django.db.models import BooleanField
10
11 from rest_framework import serializers
12
13 from InvenTree.serializers import InvenTreeModelSerializer, InvenTreeAttachmentSerializerField
14
15 from stock.serializers import StockItemSerializerBrief
16 from stock.serializers import LocationSerializer
17 from part.serializers import PartSerializer, PartBriefSerializer
18
19 from .models import Build, BuildItem, BuildOrderAttachment
20
21
22 class BuildSerializer(InvenTreeModelSerializer):
23 """ Serializes a Build object """
24
25 url = serializers.CharField(source='get_absolute_url', read_only=True)
26 status_text = serializers.CharField(source='get_status_display', read_only=True)
27
28 part_detail = PartBriefSerializer(source='part', many=False, read_only=True)
29
30 quantity = serializers.FloatField()
31
32 overdue = serializers.BooleanField(required=False, read_only=True)
33
34 @staticmethod
35 def annotate_queryset(queryset):
36 """
37 Add custom annotations to the BuildSerializer queryset,
38 performing database queries as efficiently as possible.
39
40 The following annoted fields are added:
41
42 - overdue: True if the build is outstanding *and* the completion date has past
43
44 """
45
46 # Annotate a boolean 'overdue' flag
47
48 queryset = queryset.annotate(
49 overdue=Case(
50 When(
51 Build.OVERDUE_FILTER, then=Value(True, output_field=BooleanField()),
52 ),
53 default=Value(False, output_field=BooleanField())
54 )
55 )
56
57 return queryset
58
59 def __init__(self, *args, **kwargs):
60 part_detail = kwargs.pop('part_detail', False)
61
62 super().__init__(*args, **kwargs)
63
64 if part_detail is not True:
65 self.fields.pop('part_detail')
66
67 class Meta:
68 model = Build
69 fields = [
70 'pk',
71 'url',
72 'title',
73 'creation_date',
74 'completed',
75 'completion_date',
76 'part',
77 'part_detail',
78 'overdue',
79 'reference',
80 'sales_order',
81 'quantity',
82 'status',
83 'status_text',
84 'target_date',
85 'notes',
86 'link',
87 ]
88
89 read_only_fields = [
90 'completed',
91 'creation_date',
92 'completion_data',
93 'status',
94 'status_text',
95 ]
96
97
98 class BuildItemSerializer(InvenTreeModelSerializer):
99 """ Serializes a BuildItem object """
100
101 bom_part = serializers.IntegerField(source='bom_item.sub_part.pk', read_only=True)
102 part = serializers.IntegerField(source='stock_item.part.pk', read_only=True)
103 location = serializers.IntegerField(source='stock_item.location.pk', read_only=True)
104
105 # Extra (optional) detail fields
106 part_detail = PartSerializer(source='stock_item.part', many=False, read_only=True)
107 build_detail = BuildSerializer(source='build', many=False, read_only=True)
108 stock_item_detail = StockItemSerializerBrief(source='stock_item', read_only=True)
109 location_detail = LocationSerializer(source='stock_item.location', read_only=True)
110
111 quantity = serializers.FloatField()
112
113 def __init__(self, *args, **kwargs):
114
115 build_detail = kwargs.pop('build_detail', False)
116 part_detail = kwargs.pop('part_detail', False)
117 location_detail = kwargs.pop('location_detail', False)
118
119 super().__init__(*args, **kwargs)
120
121 if not build_detail:
122 self.fields.pop('build_detail')
123
124 if not part_detail:
125 self.fields.pop('part_detail')
126
127 if not location_detail:
128 self.fields.pop('location_detail')
129
130 class Meta:
131 model = BuildItem
132 fields = [
133 'pk',
134 'bom_part',
135 'build',
136 'build_detail',
137 'install_into',
138 'location',
139 'location_detail',
140 'part',
141 'part_detail',
142 'stock_item',
143 'stock_item_detail',
144 'quantity'
145 ]
146
147
148 class BuildAttachmentSerializer(InvenTreeModelSerializer):
149 """
150 Serializer for a BuildAttachment
151 """
152
153 attachment = InvenTreeAttachmentSerializerField(required=True)
154
155 class Meta:
156 model = BuildOrderAttachment
157
158 fields = [
159 'pk',
160 'build',
161 'attachment',
162 'comment',
163 'upload_date',
164 ]
165
166 read_only_fields = [
167 'upload_date',
168 ]
169
```
Path: `InvenTree/build/api.py`
Content:
```
1 """
2 JSON API for the Build app
3 """
4
5 # -*- coding: utf-8 -*-
6 from __future__ import unicode_literals
7
8 from django_filters.rest_framework import DjangoFilterBackend
9 from rest_framework import filters
10 from rest_framework import generics
11
12 from django.conf.urls import url, include
13
14 from InvenTree.api import AttachmentMixin
15 from InvenTree.helpers import str2bool, isNull
16 from InvenTree.status_codes import BuildStatus
17
18 from .models import Build, BuildItem, BuildOrderAttachment
19 from .serializers import BuildAttachmentSerializer, BuildSerializer, BuildItemSerializer
20
21
22 class BuildList(generics.ListCreateAPIView):
23 """ API endpoint for accessing a list of Build objects.
24
25 - GET: Return list of objects (with filters)
26 - POST: Create a new Build object
27 """
28
29 queryset = Build.objects.all()
30 serializer_class = BuildSerializer
31
32 filter_backends = [
33 DjangoFilterBackend,
34 filters.SearchFilter,
35 filters.OrderingFilter,
36 ]
37
38 filter_fields = [
39 'sales_order',
40 ]
41
42 ordering_fields = [
43 'reference',
44 'part__name',
45 'status',
46 'creation_date',
47 'target_date',
48 'completion_date',
49 'quantity',
50 ]
51
52 search_fields = [
53 'reference',
54 'part__name',
55 'title',
56 ]
57
58 def get_queryset(self):
59 """
60 Override the queryset filtering,
61 as some of the fields don't natively play nicely with DRF
62 """
63
64 queryset = super().get_queryset().prefetch_related('part')
65
66 queryset = BuildSerializer.annotate_queryset(queryset)
67
68 return queryset
69
70 def filter_queryset(self, queryset):
71
72 queryset = super().filter_queryset(queryset)
73
74 params = self.request.query_params
75
76 # Filter by "parent"
77 parent = params.get('parent', None)
78
79 if parent is not None:
80 queryset = queryset.filter(parent=parent)
81
82 # Filter by "ancestor" builds
83 ancestor = params.get('ancestor', None)
84
85 if ancestor is not None:
86 try:
87 ancestor = Build.objects.get(pk=ancestor)
88
89 descendants = ancestor.get_descendants(include_self=True)
90
91 queryset = queryset.filter(
92 parent__pk__in=[b.pk for b in descendants]
93 )
94
95 except (ValueError, Build.DoesNotExist):
96 pass
97
98 # Filter by build status?
99 status = params.get('status', None)
100
101 if status is not None:
102 queryset = queryset.filter(status=status)
103
104 # Filter by "pending" status
105 active = params.get('active', None)
106
107 if active is not None:
108 active = str2bool(active)
109
110 if active:
111 queryset = queryset.filter(status__in=BuildStatus.ACTIVE_CODES)
112 else:
113 queryset = queryset.exclude(status__in=BuildStatus.ACTIVE_CODES)
114
115 # Filter by "overdue" status?
116 overdue = params.get('overdue', None)
117
118 if overdue is not None:
119 overdue = str2bool(overdue)
120
121 if overdue:
122 queryset = queryset.filter(Build.OVERDUE_FILTER)
123 else:
124 queryset = queryset.exclude(Build.OVERDUE_FILTER)
125
126 # Filter by associated part?
127 part = params.get('part', None)
128
129 if part is not None:
130 queryset = queryset.filter(part=part)
131
132 # Filter by 'date range'
133 min_date = params.get('min_date', None)
134 max_date = params.get('max_date', None)
135
136 if min_date is not None and max_date is not None:
137 queryset = Build.filterByDate(queryset, min_date, max_date)
138
139 return queryset
140
141 def get_serializer(self, *args, **kwargs):
142
143 try:
144 part_detail = str2bool(self.request.GET.get('part_detail', None))
145 except AttributeError:
146 part_detail = None
147
148 kwargs['part_detail'] = part_detail
149
150 return self.serializer_class(*args, **kwargs)
151
152
153 class BuildDetail(generics.RetrieveUpdateAPIView):
154 """ API endpoint for detail view of a Build object """
155
156 queryset = Build.objects.all()
157 serializer_class = BuildSerializer
158
159
160 class BuildItemList(generics.ListCreateAPIView):
161 """ API endpoint for accessing a list of BuildItem objects
162
163 - GET: Return list of objects
164 - POST: Create a new BuildItem object
165 """
166
167 serializer_class = BuildItemSerializer
168
169 def get_serializer(self, *args, **kwargs):
170
171 try:
172 params = self.request.query_params
173
174 kwargs['part_detail'] = str2bool(params.get('part_detail', False))
175 kwargs['build_detail'] = str2bool(params.get('build_detail', False))
176 kwargs['location_detail'] = str2bool(params.get('location_detail', False))
177 except AttributeError:
178 pass
179
180 return self.serializer_class(*args, **kwargs)
181
182 def get_queryset(self):
183 """ Override the queryset method,
184 to allow filtering by stock_item.part
185 """
186
187 query = BuildItem.objects.all()
188
189 query = query.select_related('stock_item')
190 query = query.prefetch_related('stock_item__part')
191 query = query.prefetch_related('stock_item__part__category')
192
193 return query
194
195 def filter_queryset(self, queryset):
196
197 queryset = super().filter_queryset(queryset)
198
199 params = self.request.query_params
200
201 # Does the user wish to filter by part?
202 part_pk = params.get('part', None)
203
204 if part_pk:
205 queryset = queryset.filter(stock_item__part=part_pk)
206
207 # Filter by output target
208 output = params.get('output', None)
209
210 if output:
211
212 if isNull(output):
213 queryset = queryset.filter(install_into=None)
214 else:
215 queryset = queryset.filter(install_into=output)
216
217 return queryset
218
219 filter_backends = [
220 DjangoFilterBackend,
221 ]
222
223 filter_fields = [
224 'build',
225 'stock_item',
226 'install_into',
227 ]
228
229
230 class BuildAttachmentList(generics.ListCreateAPIView, AttachmentMixin):
231 """
232 API endpoint for listing (and creating) BuildOrderAttachment objects
233 """
234
235 queryset = BuildOrderAttachment.objects.all()
236 serializer_class = BuildAttachmentSerializer
237
238
239 class BuildAttachmentDetail(generics.RetrieveUpdateDestroyAPIView, AttachmentMixin):
240 """
241 Detail endpoint for a BuildOrderAttachment object
242 """
243
244 queryset = BuildOrderAttachment.objects.all()
245 serializer_class = BuildAttachmentSerializer
246
247
248 build_api_urls = [
249
250 # Attachments
251 url(r'^attachment/', include([
252 url(r'^(?P<pk>\d+)/', BuildAttachmentDetail.as_view(), name='api-build-attachment-detail'),
253 url('^.*$', BuildAttachmentList.as_view(), name='api-build-attachment-list'),
254 ])),
255
256 # Build Items
257 url(r'^item/', include([
258 url('^.*$', BuildItemList.as_view(), name='api-build-item-list')
259 ])),
260
261 # Build Detail
262 url(r'^(?P<pk>\d+)/', BuildDetail.as_view(), name='api-build-detail'),
263
264 # Build List
265 url(r'^.*$', BuildList.as_view(), name='api-build-list'),
266 ]
267
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/InvenTree/build/api.py b/InvenTree/build/api.py
--- a/InvenTree/build/api.py
+++ b/InvenTree/build/api.py
@@ -47,6 +47,8 @@
'target_date',
'completion_date',
'quantity',
+ 'issued_by',
+ 'responsible',
]
search_fields = [
diff --git a/InvenTree/build/serializers.py b/InvenTree/build/serializers.py
--- a/InvenTree/build/serializers.py
+++ b/InvenTree/build/serializers.py
@@ -10,11 +10,12 @@
from rest_framework import serializers
-from InvenTree.serializers import InvenTreeModelSerializer, InvenTreeAttachmentSerializerField
+from InvenTree.serializers import InvenTreeModelSerializer, InvenTreeAttachmentSerializerField, UserSerializerBrief
from stock.serializers import StockItemSerializerBrief
from stock.serializers import LocationSerializer
from part.serializers import PartSerializer, PartBriefSerializer
+from users.serializers import OwnerSerializer
from .models import Build, BuildItem, BuildOrderAttachment
@@ -31,6 +32,10 @@
overdue = serializers.BooleanField(required=False, read_only=True)
+ issued_by_detail = UserSerializerBrief(source='issued_by', read_only=True)
+
+ responsible_detail = OwnerSerializer(source='responsible', read_only=True)
+
@staticmethod
def annotate_queryset(queryset):
"""
@@ -84,6 +89,10 @@
'target_date',
'notes',
'link',
+ 'issued_by',
+ 'issued_by_detail',
+ 'responsible',
+ 'responsible_detail',
]
read_only_fields = [
|
{"golden_diff": "diff --git a/InvenTree/build/api.py b/InvenTree/build/api.py\n--- a/InvenTree/build/api.py\n+++ b/InvenTree/build/api.py\n@@ -47,6 +47,8 @@\n 'target_date',\n 'completion_date',\n 'quantity',\n+ 'issued_by',\n+ 'responsible',\n ]\n \n search_fields = [\ndiff --git a/InvenTree/build/serializers.py b/InvenTree/build/serializers.py\n--- a/InvenTree/build/serializers.py\n+++ b/InvenTree/build/serializers.py\n@@ -10,11 +10,12 @@\n \n from rest_framework import serializers\n \n-from InvenTree.serializers import InvenTreeModelSerializer, InvenTreeAttachmentSerializerField\n+from InvenTree.serializers import InvenTreeModelSerializer, InvenTreeAttachmentSerializerField, UserSerializerBrief\n \n from stock.serializers import StockItemSerializerBrief\n from stock.serializers import LocationSerializer\n from part.serializers import PartSerializer, PartBriefSerializer\n+from users.serializers import OwnerSerializer\n \n from .models import Build, BuildItem, BuildOrderAttachment\n \n@@ -31,6 +32,10 @@\n \n overdue = serializers.BooleanField(required=False, read_only=True)\n \n+ issued_by_detail = UserSerializerBrief(source='issued_by', read_only=True)\n+\n+ responsible_detail = OwnerSerializer(source='responsible', read_only=True)\n+\n @staticmethod\n def annotate_queryset(queryset):\n \"\"\"\n@@ -84,6 +89,10 @@\n 'target_date',\n 'notes',\n 'link',\n+ 'issued_by',\n+ 'issued_by_detail',\n+ 'responsible',\n+ 'responsible_detail',\n ]\n \n read_only_fields = [\n", "issue": "Add \"issued_by\" and \"resposible_for\" fields to Build Order table\n\n", "before_files": [{"content": "\"\"\"\nJSON serializers for Build API\n\"\"\"\n\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db.models import Case, When, Value\nfrom django.db.models import BooleanField\n\nfrom rest_framework import serializers\n\nfrom InvenTree.serializers import InvenTreeModelSerializer, InvenTreeAttachmentSerializerField\n\nfrom stock.serializers import StockItemSerializerBrief\nfrom stock.serializers import LocationSerializer\nfrom part.serializers import PartSerializer, PartBriefSerializer\n\nfrom .models import Build, BuildItem, BuildOrderAttachment\n\n\nclass BuildSerializer(InvenTreeModelSerializer):\n \"\"\" Serializes a Build object \"\"\"\n\n url = serializers.CharField(source='get_absolute_url', read_only=True)\n status_text = serializers.CharField(source='get_status_display', read_only=True)\n\n part_detail = PartBriefSerializer(source='part', many=False, read_only=True)\n\n quantity = serializers.FloatField()\n\n overdue = serializers.BooleanField(required=False, read_only=True)\n\n @staticmethod\n def annotate_queryset(queryset):\n \"\"\"\n Add custom annotations to the BuildSerializer queryset,\n performing database queries as efficiently as possible.\n\n The following annoted fields are added:\n\n - overdue: True if the build is outstanding *and* the completion date has past\n\n \"\"\"\n\n # Annotate a boolean 'overdue' flag\n\n queryset = queryset.annotate(\n overdue=Case(\n When(\n Build.OVERDUE_FILTER, then=Value(True, output_field=BooleanField()),\n ),\n default=Value(False, output_field=BooleanField())\n )\n )\n\n return queryset\n\n def __init__(self, *args, **kwargs):\n part_detail = kwargs.pop('part_detail', False)\n\n super().__init__(*args, **kwargs)\n\n if part_detail is not True:\n self.fields.pop('part_detail')\n\n class Meta:\n model = Build\n fields = [\n 'pk',\n 'url',\n 'title',\n 'creation_date',\n 'completed',\n 'completion_date',\n 'part',\n 'part_detail',\n 'overdue',\n 'reference',\n 'sales_order',\n 'quantity',\n 'status',\n 'status_text',\n 'target_date',\n 'notes',\n 'link',\n ]\n\n read_only_fields = [\n 'completed',\n 'creation_date',\n 'completion_data',\n 'status',\n 'status_text',\n ]\n\n\nclass BuildItemSerializer(InvenTreeModelSerializer):\n \"\"\" Serializes a BuildItem object \"\"\"\n\n bom_part = serializers.IntegerField(source='bom_item.sub_part.pk', read_only=True)\n part = serializers.IntegerField(source='stock_item.part.pk', read_only=True)\n location = serializers.IntegerField(source='stock_item.location.pk', read_only=True)\n\n # Extra (optional) detail fields\n part_detail = PartSerializer(source='stock_item.part', many=False, read_only=True)\n build_detail = BuildSerializer(source='build', many=False, read_only=True)\n stock_item_detail = StockItemSerializerBrief(source='stock_item', read_only=True)\n location_detail = LocationSerializer(source='stock_item.location', read_only=True)\n\n quantity = serializers.FloatField()\n\n def __init__(self, *args, **kwargs):\n\n build_detail = kwargs.pop('build_detail', False)\n part_detail = kwargs.pop('part_detail', False)\n location_detail = kwargs.pop('location_detail', False)\n\n super().__init__(*args, **kwargs)\n\n if not build_detail:\n self.fields.pop('build_detail')\n\n if not part_detail:\n self.fields.pop('part_detail')\n\n if not location_detail:\n self.fields.pop('location_detail')\n\n class Meta:\n model = BuildItem\n fields = [\n 'pk',\n 'bom_part',\n 'build',\n 'build_detail',\n 'install_into',\n 'location',\n 'location_detail',\n 'part',\n 'part_detail',\n 'stock_item',\n 'stock_item_detail',\n 'quantity'\n ]\n\n\nclass BuildAttachmentSerializer(InvenTreeModelSerializer):\n \"\"\"\n Serializer for a BuildAttachment\n \"\"\"\n\n attachment = InvenTreeAttachmentSerializerField(required=True)\n\n class Meta:\n model = BuildOrderAttachment\n\n fields = [\n 'pk',\n 'build',\n 'attachment',\n 'comment',\n 'upload_date',\n ]\n\n read_only_fields = [\n 'upload_date',\n ]\n", "path": "InvenTree/build/serializers.py"}, {"content": "\"\"\"\nJSON API for the Build app\n\"\"\"\n\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework import filters\nfrom rest_framework import generics\n\nfrom django.conf.urls import url, include\n\nfrom InvenTree.api import AttachmentMixin\nfrom InvenTree.helpers import str2bool, isNull\nfrom InvenTree.status_codes import BuildStatus\n\nfrom .models import Build, BuildItem, BuildOrderAttachment\nfrom .serializers import BuildAttachmentSerializer, BuildSerializer, BuildItemSerializer\n\n\nclass BuildList(generics.ListCreateAPIView):\n \"\"\" API endpoint for accessing a list of Build objects.\n\n - GET: Return list of objects (with filters)\n - POST: Create a new Build object\n \"\"\"\n\n queryset = Build.objects.all()\n serializer_class = BuildSerializer\n\n filter_backends = [\n DjangoFilterBackend,\n filters.SearchFilter,\n filters.OrderingFilter,\n ]\n\n filter_fields = [\n 'sales_order',\n ]\n\n ordering_fields = [\n 'reference',\n 'part__name',\n 'status',\n 'creation_date',\n 'target_date',\n 'completion_date',\n 'quantity',\n ]\n\n search_fields = [\n 'reference',\n 'part__name',\n 'title',\n ]\n\n def get_queryset(self):\n \"\"\"\n Override the queryset filtering,\n as some of the fields don't natively play nicely with DRF\n \"\"\"\n\n queryset = super().get_queryset().prefetch_related('part')\n\n queryset = BuildSerializer.annotate_queryset(queryset)\n\n return queryset\n\n def filter_queryset(self, queryset):\n\n queryset = super().filter_queryset(queryset)\n\n params = self.request.query_params\n\n # Filter by \"parent\"\n parent = params.get('parent', None)\n\n if parent is not None:\n queryset = queryset.filter(parent=parent)\n\n # Filter by \"ancestor\" builds\n ancestor = params.get('ancestor', None)\n\n if ancestor is not None:\n try:\n ancestor = Build.objects.get(pk=ancestor)\n\n descendants = ancestor.get_descendants(include_self=True)\n\n queryset = queryset.filter(\n parent__pk__in=[b.pk for b in descendants]\n )\n\n except (ValueError, Build.DoesNotExist):\n pass\n\n # Filter by build status?\n status = params.get('status', None)\n\n if status is not None:\n queryset = queryset.filter(status=status)\n\n # Filter by \"pending\" status\n active = params.get('active', None)\n\n if active is not None:\n active = str2bool(active)\n\n if active:\n queryset = queryset.filter(status__in=BuildStatus.ACTIVE_CODES)\n else:\n queryset = queryset.exclude(status__in=BuildStatus.ACTIVE_CODES)\n\n # Filter by \"overdue\" status?\n overdue = params.get('overdue', None)\n\n if overdue is not None:\n overdue = str2bool(overdue)\n\n if overdue:\n queryset = queryset.filter(Build.OVERDUE_FILTER)\n else:\n queryset = queryset.exclude(Build.OVERDUE_FILTER)\n\n # Filter by associated part?\n part = params.get('part', None)\n\n if part is not None:\n queryset = queryset.filter(part=part)\n\n # Filter by 'date range'\n min_date = params.get('min_date', None)\n max_date = params.get('max_date', None)\n\n if min_date is not None and max_date is not None:\n queryset = Build.filterByDate(queryset, min_date, max_date)\n\n return queryset\n\n def get_serializer(self, *args, **kwargs):\n\n try:\n part_detail = str2bool(self.request.GET.get('part_detail', None))\n except AttributeError:\n part_detail = None\n\n kwargs['part_detail'] = part_detail\n\n return self.serializer_class(*args, **kwargs)\n\n\nclass BuildDetail(generics.RetrieveUpdateAPIView):\n \"\"\" API endpoint for detail view of a Build object \"\"\"\n\n queryset = Build.objects.all()\n serializer_class = BuildSerializer\n\n\nclass BuildItemList(generics.ListCreateAPIView):\n \"\"\" API endpoint for accessing a list of BuildItem objects\n\n - GET: Return list of objects\n - POST: Create a new BuildItem object\n \"\"\"\n\n serializer_class = BuildItemSerializer\n\n def get_serializer(self, *args, **kwargs):\n\n try:\n params = self.request.query_params\n\n kwargs['part_detail'] = str2bool(params.get('part_detail', False))\n kwargs['build_detail'] = str2bool(params.get('build_detail', False))\n kwargs['location_detail'] = str2bool(params.get('location_detail', False))\n except AttributeError:\n pass\n \n return self.serializer_class(*args, **kwargs)\n\n def get_queryset(self):\n \"\"\" Override the queryset method,\n to allow filtering by stock_item.part\n \"\"\"\n\n query = BuildItem.objects.all()\n\n query = query.select_related('stock_item')\n query = query.prefetch_related('stock_item__part')\n query = query.prefetch_related('stock_item__part__category')\n\n return query\n\n def filter_queryset(self, queryset):\n\n queryset = super().filter_queryset(queryset)\n\n params = self.request.query_params\n\n # Does the user wish to filter by part?\n part_pk = params.get('part', None)\n\n if part_pk:\n queryset = queryset.filter(stock_item__part=part_pk)\n\n # Filter by output target\n output = params.get('output', None)\n\n if output:\n\n if isNull(output):\n queryset = queryset.filter(install_into=None)\n else:\n queryset = queryset.filter(install_into=output)\n\n return queryset\n\n filter_backends = [\n DjangoFilterBackend,\n ]\n\n filter_fields = [\n 'build',\n 'stock_item',\n 'install_into',\n ]\n\n\nclass BuildAttachmentList(generics.ListCreateAPIView, AttachmentMixin):\n \"\"\"\n API endpoint for listing (and creating) BuildOrderAttachment objects\n \"\"\"\n\n queryset = BuildOrderAttachment.objects.all()\n serializer_class = BuildAttachmentSerializer\n\n\nclass BuildAttachmentDetail(generics.RetrieveUpdateDestroyAPIView, AttachmentMixin):\n \"\"\"\n Detail endpoint for a BuildOrderAttachment object\n \"\"\"\n\n queryset = BuildOrderAttachment.objects.all()\n serializer_class = BuildAttachmentSerializer\n\n\nbuild_api_urls = [\n\n # Attachments\n url(r'^attachment/', include([\n url(r'^(?P<pk>\\d+)/', BuildAttachmentDetail.as_view(), name='api-build-attachment-detail'),\n url('^.*$', BuildAttachmentList.as_view(), name='api-build-attachment-list'),\n ])),\n\n # Build Items\n url(r'^item/', include([\n url('^.*$', BuildItemList.as_view(), name='api-build-item-list')\n ])),\n\n # Build Detail\n url(r'^(?P<pk>\\d+)/', BuildDetail.as_view(), name='api-build-detail'),\n\n # Build List\n url(r'^.*$', BuildList.as_view(), name='api-build-list'),\n]\n", "path": "InvenTree/build/api.py"}], "after_files": [{"content": "\"\"\"\nJSON serializers for Build API\n\"\"\"\n\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db.models import Case, When, Value\nfrom django.db.models import BooleanField\n\nfrom rest_framework import serializers\n\nfrom InvenTree.serializers import InvenTreeModelSerializer, InvenTreeAttachmentSerializerField, UserSerializerBrief\n\nfrom stock.serializers import StockItemSerializerBrief\nfrom stock.serializers import LocationSerializer\nfrom part.serializers import PartSerializer, PartBriefSerializer\nfrom users.serializers import OwnerSerializer\n\nfrom .models import Build, BuildItem, BuildOrderAttachment\n\n\nclass BuildSerializer(InvenTreeModelSerializer):\n \"\"\" Serializes a Build object \"\"\"\n\n url = serializers.CharField(source='get_absolute_url', read_only=True)\n status_text = serializers.CharField(source='get_status_display', read_only=True)\n\n part_detail = PartBriefSerializer(source='part', many=False, read_only=True)\n\n quantity = serializers.FloatField()\n\n overdue = serializers.BooleanField(required=False, read_only=True)\n\n issued_by_detail = UserSerializerBrief(source='issued_by', read_only=True)\n\n responsible_detail = OwnerSerializer(source='responsible', read_only=True)\n\n @staticmethod\n def annotate_queryset(queryset):\n \"\"\"\n Add custom annotations to the BuildSerializer queryset,\n performing database queries as efficiently as possible.\n\n The following annoted fields are added:\n\n - overdue: True if the build is outstanding *and* the completion date has past\n\n \"\"\"\n\n # Annotate a boolean 'overdue' flag\n\n queryset = queryset.annotate(\n overdue=Case(\n When(\n Build.OVERDUE_FILTER, then=Value(True, output_field=BooleanField()),\n ),\n default=Value(False, output_field=BooleanField())\n )\n )\n\n return queryset\n\n def __init__(self, *args, **kwargs):\n part_detail = kwargs.pop('part_detail', False)\n\n super().__init__(*args, **kwargs)\n\n if part_detail is not True:\n self.fields.pop('part_detail')\n\n class Meta:\n model = Build\n fields = [\n 'pk',\n 'url',\n 'title',\n 'creation_date',\n 'completed',\n 'completion_date',\n 'part',\n 'part_detail',\n 'overdue',\n 'reference',\n 'sales_order',\n 'quantity',\n 'status',\n 'status_text',\n 'target_date',\n 'notes',\n 'link',\n 'issued_by',\n 'issued_by_detail',\n 'responsible',\n 'responsible_detail',\n ]\n\n read_only_fields = [\n 'completed',\n 'creation_date',\n 'completion_data',\n 'status',\n 'status_text',\n ]\n\n\nclass BuildItemSerializer(InvenTreeModelSerializer):\n \"\"\" Serializes a BuildItem object \"\"\"\n\n bom_part = serializers.IntegerField(source='bom_item.sub_part.pk', read_only=True)\n part = serializers.IntegerField(source='stock_item.part.pk', read_only=True)\n location = serializers.IntegerField(source='stock_item.location.pk', read_only=True)\n\n # Extra (optional) detail fields\n part_detail = PartSerializer(source='stock_item.part', many=False, read_only=True)\n build_detail = BuildSerializer(source='build', many=False, read_only=True)\n stock_item_detail = StockItemSerializerBrief(source='stock_item', read_only=True)\n location_detail = LocationSerializer(source='stock_item.location', read_only=True)\n\n quantity = serializers.FloatField()\n\n def __init__(self, *args, **kwargs):\n\n build_detail = kwargs.pop('build_detail', False)\n part_detail = kwargs.pop('part_detail', False)\n location_detail = kwargs.pop('location_detail', False)\n\n super().__init__(*args, **kwargs)\n\n if not build_detail:\n self.fields.pop('build_detail')\n\n if not part_detail:\n self.fields.pop('part_detail')\n\n if not location_detail:\n self.fields.pop('location_detail')\n\n class Meta:\n model = BuildItem\n fields = [\n 'pk',\n 'bom_part',\n 'build',\n 'build_detail',\n 'install_into',\n 'location',\n 'location_detail',\n 'part',\n 'part_detail',\n 'stock_item',\n 'stock_item_detail',\n 'quantity'\n ]\n\n\nclass BuildAttachmentSerializer(InvenTreeModelSerializer):\n \"\"\"\n Serializer for a BuildAttachment\n \"\"\"\n\n attachment = InvenTreeAttachmentSerializerField(required=True)\n\n class Meta:\n model = BuildOrderAttachment\n\n fields = [\n 'pk',\n 'build',\n 'attachment',\n 'comment',\n 'upload_date',\n ]\n\n read_only_fields = [\n 'upload_date',\n ]\n", "path": "InvenTree/build/serializers.py"}, {"content": "\"\"\"\nJSON API for the Build app\n\"\"\"\n\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework import filters\nfrom rest_framework import generics\n\nfrom django.conf.urls import url, include\n\nfrom InvenTree.api import AttachmentMixin\nfrom InvenTree.helpers import str2bool, isNull\nfrom InvenTree.status_codes import BuildStatus\n\nfrom .models import Build, BuildItem, BuildOrderAttachment\nfrom .serializers import BuildAttachmentSerializer, BuildSerializer, BuildItemSerializer\n\n\nclass BuildList(generics.ListCreateAPIView):\n \"\"\" API endpoint for accessing a list of Build objects.\n\n - GET: Return list of objects (with filters)\n - POST: Create a new Build object\n \"\"\"\n\n queryset = Build.objects.all()\n serializer_class = BuildSerializer\n\n filter_backends = [\n DjangoFilterBackend,\n filters.SearchFilter,\n filters.OrderingFilter,\n ]\n\n filter_fields = [\n 'sales_order',\n ]\n\n ordering_fields = [\n 'reference',\n 'part__name',\n 'status',\n 'creation_date',\n 'target_date',\n 'completion_date',\n 'quantity',\n 'issued_by',\n 'responsible',\n ]\n\n search_fields = [\n 'reference',\n 'part__name',\n 'title',\n ]\n\n def get_queryset(self):\n \"\"\"\n Override the queryset filtering,\n as some of the fields don't natively play nicely with DRF\n \"\"\"\n\n queryset = super().get_queryset().prefetch_related('part')\n\n queryset = BuildSerializer.annotate_queryset(queryset)\n\n return queryset\n\n def filter_queryset(self, queryset):\n\n queryset = super().filter_queryset(queryset)\n\n params = self.request.query_params\n\n # Filter by \"parent\"\n parent = params.get('parent', None)\n\n if parent is not None:\n queryset = queryset.filter(parent=parent)\n\n # Filter by \"ancestor\" builds\n ancestor = params.get('ancestor', None)\n\n if ancestor is not None:\n try:\n ancestor = Build.objects.get(pk=ancestor)\n\n descendants = ancestor.get_descendants(include_self=True)\n\n queryset = queryset.filter(\n parent__pk__in=[b.pk for b in descendants]\n )\n\n except (ValueError, Build.DoesNotExist):\n pass\n\n # Filter by build status?\n status = params.get('status', None)\n\n if status is not None:\n queryset = queryset.filter(status=status)\n\n # Filter by \"pending\" status\n active = params.get('active', None)\n\n if active is not None:\n active = str2bool(active)\n\n if active:\n queryset = queryset.filter(status__in=BuildStatus.ACTIVE_CODES)\n else:\n queryset = queryset.exclude(status__in=BuildStatus.ACTIVE_CODES)\n\n # Filter by \"overdue\" status?\n overdue = params.get('overdue', None)\n\n if overdue is not None:\n overdue = str2bool(overdue)\n\n if overdue:\n queryset = queryset.filter(Build.OVERDUE_FILTER)\n else:\n queryset = queryset.exclude(Build.OVERDUE_FILTER)\n\n # Filter by associated part?\n part = params.get('part', None)\n\n if part is not None:\n queryset = queryset.filter(part=part)\n\n # Filter by 'date range'\n min_date = params.get('min_date', None)\n max_date = params.get('max_date', None)\n\n if min_date is not None and max_date is not None:\n queryset = Build.filterByDate(queryset, min_date, max_date)\n\n return queryset\n\n def get_serializer(self, *args, **kwargs):\n\n try:\n part_detail = str2bool(self.request.GET.get('part_detail', None))\n except AttributeError:\n part_detail = None\n\n kwargs['part_detail'] = part_detail\n\n return self.serializer_class(*args, **kwargs)\n\n\nclass BuildDetail(generics.RetrieveUpdateAPIView):\n \"\"\" API endpoint for detail view of a Build object \"\"\"\n\n queryset = Build.objects.all()\n serializer_class = BuildSerializer\n\n\nclass BuildItemList(generics.ListCreateAPIView):\n \"\"\" API endpoint for accessing a list of BuildItem objects\n\n - GET: Return list of objects\n - POST: Create a new BuildItem object\n \"\"\"\n\n serializer_class = BuildItemSerializer\n\n def get_serializer(self, *args, **kwargs):\n\n try:\n params = self.request.query_params\n\n kwargs['part_detail'] = str2bool(params.get('part_detail', False))\n kwargs['build_detail'] = str2bool(params.get('build_detail', False))\n kwargs['location_detail'] = str2bool(params.get('location_detail', False))\n except AttributeError:\n pass\n \n return self.serializer_class(*args, **kwargs)\n\n def get_queryset(self):\n \"\"\" Override the queryset method,\n to allow filtering by stock_item.part\n \"\"\"\n\n query = BuildItem.objects.all()\n\n query = query.select_related('stock_item')\n query = query.prefetch_related('stock_item__part')\n query = query.prefetch_related('stock_item__part__category')\n\n return query\n\n def filter_queryset(self, queryset):\n\n queryset = super().filter_queryset(queryset)\n\n params = self.request.query_params\n\n # Does the user wish to filter by part?\n part_pk = params.get('part', None)\n\n if part_pk:\n queryset = queryset.filter(stock_item__part=part_pk)\n\n # Filter by output target\n output = params.get('output', None)\n\n if output:\n\n if isNull(output):\n queryset = queryset.filter(install_into=None)\n else:\n queryset = queryset.filter(install_into=output)\n\n return queryset\n\n filter_backends = [\n DjangoFilterBackend,\n ]\n\n filter_fields = [\n 'build',\n 'stock_item',\n 'install_into',\n ]\n\n\nclass BuildAttachmentList(generics.ListCreateAPIView, AttachmentMixin):\n \"\"\"\n API endpoint for listing (and creating) BuildOrderAttachment objects\n \"\"\"\n\n queryset = BuildOrderAttachment.objects.all()\n serializer_class = BuildAttachmentSerializer\n\n\nclass BuildAttachmentDetail(generics.RetrieveUpdateDestroyAPIView, AttachmentMixin):\n \"\"\"\n Detail endpoint for a BuildOrderAttachment object\n \"\"\"\n\n queryset = BuildOrderAttachment.objects.all()\n serializer_class = BuildAttachmentSerializer\n\n\nbuild_api_urls = [\n\n # Attachments\n url(r'^attachment/', include([\n url(r'^(?P<pk>\\d+)/', BuildAttachmentDetail.as_view(), name='api-build-attachment-detail'),\n url('^.*$', BuildAttachmentList.as_view(), name='api-build-attachment-list'),\n ])),\n\n # Build Items\n url(r'^item/', include([\n url('^.*$', BuildItemList.as_view(), name='api-build-item-list')\n ])),\n\n # Build Detail\n url(r'^(?P<pk>\\d+)/', BuildDetail.as_view(), name='api-build-detail'),\n\n # Build List\n url(r'^.*$', BuildList.as_view(), name='api-build-list'),\n]\n", "path": "InvenTree/build/api.py"}]}
| 3,894 | 389 |
gh_patches_debug_37732
|
rasdani/github-patches
|
git_diff
|
mars-project__mars-2150
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Move the metadata into setup.cfg
https://github.com/gvalkov/setuptools-py2cfg can be helpful.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 1999-2020 Alibaba Group Holding Ltd.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import platform
17 import re
18 import sys
19 from setuptools import setup, find_packages, Extension
20 from distutils.sysconfig import get_config_var
21 from distutils.version import LooseVersion
22
23 import numpy as np
24 from Cython.Build import cythonize
25
26 try:
27 import distutils.ccompiler
28 if sys.platform != 'win32':
29 from numpy.distutils.ccompiler import CCompiler_compile
30 distutils.ccompiler.CCompiler.compile = CCompiler_compile
31 except ImportError:
32 pass
33
34 # From https://github.com/pandas-dev/pandas/pull/24274:
35 # For mac, ensure extensions are built for macos 10.9 when compiling on a
36 # 10.9 system or above, overriding distuitls behaviour which is to target
37 # the version that python was built for. This may be overridden by setting
38 # MACOSX_DEPLOYMENT_TARGET before calling setup.py
39 if sys.platform == 'darwin':
40 if 'MACOSX_DEPLOYMENT_TARGET' not in os.environ:
41 current_system = LooseVersion(platform.mac_ver()[0])
42 python_target = LooseVersion(
43 get_config_var('MACOSX_DEPLOYMENT_TARGET'))
44 if python_target < '10.9' and current_system >= '10.9':
45 os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.9'
46
47
48 repo_root = os.path.dirname(os.path.abspath(__file__))
49
50
51 def execfile(fname, globs, locs=None):
52 locs = locs or globs
53 exec(compile(open(fname).read(), fname, "exec"), globs, locs)
54
55
56 version_file_path = os.path.join(repo_root, 'mars', '_version.py')
57 version_ns = {'__file__': version_file_path}
58 execfile(version_file_path, version_ns)
59 version = version_ns['__version__']
60 # check version vs tag
61 if os.environ.get('GIT_TAG') and re.search(r'v\d', os.environ['GIT_TAG']) \
62 and os.environ['GIT_TAG'] != 'v' + version:
63 raise ValueError('Tag %r does not match source version %r'
64 % (os.environ['GIT_TAG'], version))
65
66 requirements = []
67 with open(os.path.join(repo_root, 'requirements.txt'), 'r') as f:
68 requirements.extend(f.read().splitlines())
69
70 extra_requirements = []
71 with open(os.path.join(repo_root, 'requirements-extra.txt'), 'r') as f:
72 extra_requirements.extend(f.read().splitlines())
73
74 dev_requirements = []
75 with open(os.path.join(repo_root, 'requirements-dev.txt'), 'r') as f:
76 dev_requirements.extend(f.read().splitlines())
77
78 vineyard_requirements = []
79 with open(os.path.join(repo_root, 'requirements-vineyard.txt'), 'r') as f:
80 vineyard_requirements.extend(f.read().splitlines())
81
82 long_description = None
83 if os.path.exists(os.path.join(repo_root, 'README.rst')):
84 with open(os.path.join(repo_root, 'README.rst'), encoding='utf-8') as f:
85 long_description = f.read()
86
87
88 if os.path.exists(os.path.join(repo_root, '.git')):
89 git_info = version_ns['get_git_info']()
90 if git_info:
91 with open(os.path.join(repo_root, 'mars', '.git-branch'), 'w') as git_file:
92 git_file.write(' '.join(git_info))
93
94 cythonize_kw = dict(language_level=sys.version_info[0])
95 cy_extension_kw = dict()
96 if os.environ.get('CYTHON_TRACE'):
97 cy_extension_kw['define_macros'] = [('CYTHON_TRACE_NOGIL', '1'), ('CYTHON_TRACE', '1')]
98 cythonize_kw['compiler_directives'] = {'linetrace': True}
99
100 if 'MSC' in sys.version:
101 extra_compile_args = ['/Ot', '/I' + os.path.join(repo_root, 'misc')]
102 cy_extension_kw['extra_compile_args'] = extra_compile_args
103 else:
104 extra_compile_args = ['-O3']
105 cy_extension_kw['extra_compile_args'] = extra_compile_args
106
107
108 def _discover_pyx():
109 exts = dict()
110 for root, _, files in os.walk(os.path.join(repo_root, 'mars')):
111 for fn in files:
112 if not fn.endswith('.pyx'):
113 continue
114 full_fn = os.path.relpath(os.path.join(root, fn), repo_root)
115 mod_name = full_fn.replace('.pyx', '').replace(os.path.sep, '.')
116 exts[mod_name] = Extension(mod_name, [full_fn], **cy_extension_kw)
117 return exts
118
119
120 cy_extension_kw['include_dirs'] = [np.get_include()]
121 extensions_dict = _discover_pyx()
122 cy_extensions = list(extensions_dict.values())
123
124 extensions = cythonize(cy_extensions, **cythonize_kw) + \
125 [Extension('mars.lib.mmh3', ['mars/lib/mmh3_src/mmh3module.cpp', 'mars/lib/mmh3_src/MurmurHash3.cpp'])]
126
127
128 setup_options = dict(
129 name='pymars',
130 version=version,
131 description='MARS: a tensor-based unified framework for large-scale data computation.',
132 long_description=long_description,
133 long_description_content_type='text/x-rst',
134 author='Qin Xuye',
135 author_email='[email protected]',
136 maintainer='Qin Xuye',
137 maintainer_email='[email protected]',
138 url='http://github.com/mars-project/mars',
139 license='Apache License 2.0',
140 classifiers=[
141 'Operating System :: OS Independent',
142 'Programming Language :: Python',
143 'Programming Language :: Python :: 3',
144 'Programming Language :: Python :: 3.6',
145 'Programming Language :: Python :: 3.7',
146 'Programming Language :: Python :: 3.8',
147 'Programming Language :: Python :: Implementation :: CPython',
148 'Topic :: Software Development :: Libraries',
149 ],
150 packages=find_packages(exclude=('*.tests.*', '*.tests')),
151 include_package_data=True,
152 entry_points={'console_scripts': [
153 'mars-scheduler = mars.scheduler.__main__:main',
154 'mars-worker = mars.worker.__main__:main',
155 'mars-web = mars.web.__main__:main',
156 ]},
157 python_requires='>=3.6',
158 install_requires=requirements,
159 ext_modules=extensions,
160 extras_require={
161 'distributed': extra_requirements,
162 'dev': extra_requirements + dev_requirements,
163 'vineyard': vineyard_requirements,
164 }
165 )
166 setup(**setup_options)
167
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -16,7 +16,7 @@
import platform
import re
import sys
-from setuptools import setup, find_packages, Extension
+from setuptools import setup, Extension
from distutils.sysconfig import get_config_var
from distutils.version import LooseVersion
@@ -63,27 +63,6 @@
raise ValueError('Tag %r does not match source version %r'
% (os.environ['GIT_TAG'], version))
-requirements = []
-with open(os.path.join(repo_root, 'requirements.txt'), 'r') as f:
- requirements.extend(f.read().splitlines())
-
-extra_requirements = []
-with open(os.path.join(repo_root, 'requirements-extra.txt'), 'r') as f:
- extra_requirements.extend(f.read().splitlines())
-
-dev_requirements = []
-with open(os.path.join(repo_root, 'requirements-dev.txt'), 'r') as f:
- dev_requirements.extend(f.read().splitlines())
-
-vineyard_requirements = []
-with open(os.path.join(repo_root, 'requirements-vineyard.txt'), 'r') as f:
- vineyard_requirements.extend(f.read().splitlines())
-
-long_description = None
-if os.path.exists(os.path.join(repo_root, 'README.rst')):
- with open(os.path.join(repo_root, 'README.rst'), encoding='utf-8') as f:
- long_description = f.read()
-
if os.path.exists(os.path.join(repo_root, '.git')):
git_info = version_ns['get_git_info']()
@@ -126,41 +105,7 @@
setup_options = dict(
- name='pymars',
version=version,
- description='MARS: a tensor-based unified framework for large-scale data computation.',
- long_description=long_description,
- long_description_content_type='text/x-rst',
- author='Qin Xuye',
- author_email='[email protected]',
- maintainer='Qin Xuye',
- maintainer_email='[email protected]',
- url='http://github.com/mars-project/mars',
- license='Apache License 2.0',
- classifiers=[
- 'Operating System :: OS Independent',
- 'Programming Language :: Python',
- 'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.6',
- 'Programming Language :: Python :: 3.7',
- 'Programming Language :: Python :: 3.8',
- 'Programming Language :: Python :: Implementation :: CPython',
- 'Topic :: Software Development :: Libraries',
- ],
- packages=find_packages(exclude=('*.tests.*', '*.tests')),
- include_package_data=True,
- entry_points={'console_scripts': [
- 'mars-scheduler = mars.scheduler.__main__:main',
- 'mars-worker = mars.worker.__main__:main',
- 'mars-web = mars.web.__main__:main',
- ]},
- python_requires='>=3.6',
- install_requires=requirements,
ext_modules=extensions,
- extras_require={
- 'distributed': extra_requirements,
- 'dev': extra_requirements + dev_requirements,
- 'vineyard': vineyard_requirements,
- }
)
setup(**setup_options)
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -16,7 +16,7 @@\n import platform\n import re\n import sys\n-from setuptools import setup, find_packages, Extension\n+from setuptools import setup, Extension\n from distutils.sysconfig import get_config_var\n from distutils.version import LooseVersion\n \n@@ -63,27 +63,6 @@\n raise ValueError('Tag %r does not match source version %r'\n % (os.environ['GIT_TAG'], version))\n \n-requirements = []\n-with open(os.path.join(repo_root, 'requirements.txt'), 'r') as f:\n- requirements.extend(f.read().splitlines())\n-\n-extra_requirements = []\n-with open(os.path.join(repo_root, 'requirements-extra.txt'), 'r') as f:\n- extra_requirements.extend(f.read().splitlines())\n-\n-dev_requirements = []\n-with open(os.path.join(repo_root, 'requirements-dev.txt'), 'r') as f:\n- dev_requirements.extend(f.read().splitlines())\n-\n-vineyard_requirements = []\n-with open(os.path.join(repo_root, 'requirements-vineyard.txt'), 'r') as f:\n- vineyard_requirements.extend(f.read().splitlines())\n-\n-long_description = None\n-if os.path.exists(os.path.join(repo_root, 'README.rst')):\n- with open(os.path.join(repo_root, 'README.rst'), encoding='utf-8') as f:\n- long_description = f.read()\n-\n \n if os.path.exists(os.path.join(repo_root, '.git')):\n git_info = version_ns['get_git_info']()\n@@ -126,41 +105,7 @@\n \n \n setup_options = dict(\n- name='pymars',\n version=version,\n- description='MARS: a tensor-based unified framework for large-scale data computation.',\n- long_description=long_description,\n- long_description_content_type='text/x-rst',\n- author='Qin Xuye',\n- author_email='[email protected]',\n- maintainer='Qin Xuye',\n- maintainer_email='[email protected]',\n- url='http://github.com/mars-project/mars',\n- license='Apache License 2.0',\n- classifiers=[\n- 'Operating System :: OS Independent',\n- 'Programming Language :: Python',\n- 'Programming Language :: Python :: 3',\n- 'Programming Language :: Python :: 3.6',\n- 'Programming Language :: Python :: 3.7',\n- 'Programming Language :: Python :: 3.8',\n- 'Programming Language :: Python :: Implementation :: CPython',\n- 'Topic :: Software Development :: Libraries',\n- ],\n- packages=find_packages(exclude=('*.tests.*', '*.tests')),\n- include_package_data=True,\n- entry_points={'console_scripts': [\n- 'mars-scheduler = mars.scheduler.__main__:main',\n- 'mars-worker = mars.worker.__main__:main',\n- 'mars-web = mars.web.__main__:main',\n- ]},\n- python_requires='>=3.6',\n- install_requires=requirements,\n ext_modules=extensions,\n- extras_require={\n- 'distributed': extra_requirements,\n- 'dev': extra_requirements + dev_requirements,\n- 'vineyard': vineyard_requirements,\n- }\n )\n setup(**setup_options)\n", "issue": "Move the metadata into setup.cfg\nhttps://github.com/gvalkov/setuptools-py2cfg can be helpful.\n", "before_files": [{"content": "# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport platform\nimport re\nimport sys\nfrom setuptools import setup, find_packages, Extension\nfrom distutils.sysconfig import get_config_var\nfrom distutils.version import LooseVersion\n\nimport numpy as np\nfrom Cython.Build import cythonize\n\ntry:\n import distutils.ccompiler\n if sys.platform != 'win32':\n from numpy.distutils.ccompiler import CCompiler_compile\n distutils.ccompiler.CCompiler.compile = CCompiler_compile\nexcept ImportError:\n pass\n\n# From https://github.com/pandas-dev/pandas/pull/24274:\n# For mac, ensure extensions are built for macos 10.9 when compiling on a\n# 10.9 system or above, overriding distuitls behaviour which is to target\n# the version that python was built for. This may be overridden by setting\n# MACOSX_DEPLOYMENT_TARGET before calling setup.py\nif sys.platform == 'darwin':\n if 'MACOSX_DEPLOYMENT_TARGET' not in os.environ:\n current_system = LooseVersion(platform.mac_ver()[0])\n python_target = LooseVersion(\n get_config_var('MACOSX_DEPLOYMENT_TARGET'))\n if python_target < '10.9' and current_system >= '10.9':\n os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.9'\n\n\nrepo_root = os.path.dirname(os.path.abspath(__file__))\n\n\ndef execfile(fname, globs, locs=None):\n locs = locs or globs\n exec(compile(open(fname).read(), fname, \"exec\"), globs, locs)\n\n\nversion_file_path = os.path.join(repo_root, 'mars', '_version.py')\nversion_ns = {'__file__': version_file_path}\nexecfile(version_file_path, version_ns)\nversion = version_ns['__version__']\n# check version vs tag\nif os.environ.get('GIT_TAG') and re.search(r'v\\d', os.environ['GIT_TAG']) \\\n and os.environ['GIT_TAG'] != 'v' + version:\n raise ValueError('Tag %r does not match source version %r'\n % (os.environ['GIT_TAG'], version))\n\nrequirements = []\nwith open(os.path.join(repo_root, 'requirements.txt'), 'r') as f:\n requirements.extend(f.read().splitlines())\n\nextra_requirements = []\nwith open(os.path.join(repo_root, 'requirements-extra.txt'), 'r') as f:\n extra_requirements.extend(f.read().splitlines())\n\ndev_requirements = []\nwith open(os.path.join(repo_root, 'requirements-dev.txt'), 'r') as f:\n dev_requirements.extend(f.read().splitlines())\n\nvineyard_requirements = []\nwith open(os.path.join(repo_root, 'requirements-vineyard.txt'), 'r') as f:\n vineyard_requirements.extend(f.read().splitlines())\n\nlong_description = None\nif os.path.exists(os.path.join(repo_root, 'README.rst')):\n with open(os.path.join(repo_root, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\n\nif os.path.exists(os.path.join(repo_root, '.git')):\n git_info = version_ns['get_git_info']()\n if git_info:\n with open(os.path.join(repo_root, 'mars', '.git-branch'), 'w') as git_file:\n git_file.write(' '.join(git_info))\n\ncythonize_kw = dict(language_level=sys.version_info[0])\ncy_extension_kw = dict()\nif os.environ.get('CYTHON_TRACE'):\n cy_extension_kw['define_macros'] = [('CYTHON_TRACE_NOGIL', '1'), ('CYTHON_TRACE', '1')]\n cythonize_kw['compiler_directives'] = {'linetrace': True}\n\nif 'MSC' in sys.version:\n extra_compile_args = ['/Ot', '/I' + os.path.join(repo_root, 'misc')]\n cy_extension_kw['extra_compile_args'] = extra_compile_args\nelse:\n extra_compile_args = ['-O3']\n cy_extension_kw['extra_compile_args'] = extra_compile_args\n\n\ndef _discover_pyx():\n exts = dict()\n for root, _, files in os.walk(os.path.join(repo_root, 'mars')):\n for fn in files:\n if not fn.endswith('.pyx'):\n continue\n full_fn = os.path.relpath(os.path.join(root, fn), repo_root)\n mod_name = full_fn.replace('.pyx', '').replace(os.path.sep, '.')\n exts[mod_name] = Extension(mod_name, [full_fn], **cy_extension_kw)\n return exts\n\n\ncy_extension_kw['include_dirs'] = [np.get_include()]\nextensions_dict = _discover_pyx()\ncy_extensions = list(extensions_dict.values())\n\nextensions = cythonize(cy_extensions, **cythonize_kw) + \\\n [Extension('mars.lib.mmh3', ['mars/lib/mmh3_src/mmh3module.cpp', 'mars/lib/mmh3_src/MurmurHash3.cpp'])]\n\n\nsetup_options = dict(\n name='pymars',\n version=version,\n description='MARS: a tensor-based unified framework for large-scale data computation.',\n long_description=long_description,\n long_description_content_type='text/x-rst',\n author='Qin Xuye',\n author_email='[email protected]',\n maintainer='Qin Xuye',\n maintainer_email='[email protected]',\n url='http://github.com/mars-project/mars',\n license='Apache License 2.0',\n classifiers=[\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Software Development :: Libraries',\n ],\n packages=find_packages(exclude=('*.tests.*', '*.tests')),\n include_package_data=True,\n entry_points={'console_scripts': [\n 'mars-scheduler = mars.scheduler.__main__:main',\n 'mars-worker = mars.worker.__main__:main',\n 'mars-web = mars.web.__main__:main',\n ]},\n python_requires='>=3.6',\n install_requires=requirements,\n ext_modules=extensions,\n extras_require={\n 'distributed': extra_requirements,\n 'dev': extra_requirements + dev_requirements,\n 'vineyard': vineyard_requirements,\n }\n)\nsetup(**setup_options)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport platform\nimport re\nimport sys\nfrom setuptools import setup, Extension\nfrom distutils.sysconfig import get_config_var\nfrom distutils.version import LooseVersion\n\nimport numpy as np\nfrom Cython.Build import cythonize\n\ntry:\n import distutils.ccompiler\n if sys.platform != 'win32':\n from numpy.distutils.ccompiler import CCompiler_compile\n distutils.ccompiler.CCompiler.compile = CCompiler_compile\nexcept ImportError:\n pass\n\n# From https://github.com/pandas-dev/pandas/pull/24274:\n# For mac, ensure extensions are built for macos 10.9 when compiling on a\n# 10.9 system or above, overriding distuitls behaviour which is to target\n# the version that python was built for. This may be overridden by setting\n# MACOSX_DEPLOYMENT_TARGET before calling setup.py\nif sys.platform == 'darwin':\n if 'MACOSX_DEPLOYMENT_TARGET' not in os.environ:\n current_system = LooseVersion(platform.mac_ver()[0])\n python_target = LooseVersion(\n get_config_var('MACOSX_DEPLOYMENT_TARGET'))\n if python_target < '10.9' and current_system >= '10.9':\n os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.9'\n\n\nrepo_root = os.path.dirname(os.path.abspath(__file__))\n\n\ndef execfile(fname, globs, locs=None):\n locs = locs or globs\n exec(compile(open(fname).read(), fname, \"exec\"), globs, locs)\n\n\nversion_file_path = os.path.join(repo_root, 'mars', '_version.py')\nversion_ns = {'__file__': version_file_path}\nexecfile(version_file_path, version_ns)\nversion = version_ns['__version__']\n# check version vs tag\nif os.environ.get('GIT_TAG') and re.search(r'v\\d', os.environ['GIT_TAG']) \\\n and os.environ['GIT_TAG'] != 'v' + version:\n raise ValueError('Tag %r does not match source version %r'\n % (os.environ['GIT_TAG'], version))\n\n\nif os.path.exists(os.path.join(repo_root, '.git')):\n git_info = version_ns['get_git_info']()\n if git_info:\n with open(os.path.join(repo_root, 'mars', '.git-branch'), 'w') as git_file:\n git_file.write(' '.join(git_info))\n\ncythonize_kw = dict(language_level=sys.version_info[0])\ncy_extension_kw = dict()\nif os.environ.get('CYTHON_TRACE'):\n cy_extension_kw['define_macros'] = [('CYTHON_TRACE_NOGIL', '1'), ('CYTHON_TRACE', '1')]\n cythonize_kw['compiler_directives'] = {'linetrace': True}\n\nif 'MSC' in sys.version:\n extra_compile_args = ['/Ot', '/I' + os.path.join(repo_root, 'misc')]\n cy_extension_kw['extra_compile_args'] = extra_compile_args\nelse:\n extra_compile_args = ['-O3']\n cy_extension_kw['extra_compile_args'] = extra_compile_args\n\n\ndef _discover_pyx():\n exts = dict()\n for root, _, files in os.walk(os.path.join(repo_root, 'mars')):\n for fn in files:\n if not fn.endswith('.pyx'):\n continue\n full_fn = os.path.relpath(os.path.join(root, fn), repo_root)\n mod_name = full_fn.replace('.pyx', '').replace(os.path.sep, '.')\n exts[mod_name] = Extension(mod_name, [full_fn], **cy_extension_kw)\n return exts\n\n\ncy_extension_kw['include_dirs'] = [np.get_include()]\nextensions_dict = _discover_pyx()\ncy_extensions = list(extensions_dict.values())\n\nextensions = cythonize(cy_extensions, **cythonize_kw) + \\\n [Extension('mars.lib.mmh3', ['mars/lib/mmh3_src/mmh3module.cpp', 'mars/lib/mmh3_src/MurmurHash3.cpp'])]\n\n\nsetup_options = dict(\n version=version,\n ext_modules=extensions,\n)\nsetup(**setup_options)\n", "path": "setup.py"}]}
| 2,211 | 720 |
gh_patches_debug_29502
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-618
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implement filtering options for Boolean types
## Problem
We need to ensure that records that include columns of Boolean type support the following filters via API:
- is {true/false}
- is not {true/false}
- is empty
- is not empty
This may already be done, please verify that it works and add some test cases specifically for boolean data types.
This involves:
- Implementing the filters in the backend
- Updating the `/api/v0/databases/<id>/types/` endpoint to store available filters on this type
- Filter information should include the number of parameters needing to be passed in (e.g. `is` needs 1 parameter, `is empty` needs 0)
## Additional context
- We're using our fork of `sqlalchemy-filters` to provide filtering. See: https://github.com/centerofci/sqlalchemy-filters.
- See also: #387
- #557 provides some context on storing filters.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/serializers.py`
Content:
```
1 import requests
2
3 from django.urls import reverse
4 from rest_framework import serializers
5 from rest_framework.exceptions import ValidationError
6 from rest_framework.fields import empty
7 from rest_framework.settings import api_settings
8
9 from mathesar.models import Table, Schema, DataFile, Database, Constraint
10
11
12 SUPPORTED_URL_CONTENT_TYPES = {'text/csv', 'text/plain'}
13
14
15 class ModelNameField(serializers.CharField):
16 """
17 De-serializes the request field as a string, but serializes the response field as
18 `model.name`. Required to support passing and returing a model name from the
19 endpoint, while also storing the model as a related field.
20 """
21 def to_representation(self, value):
22 return value.name
23
24
25 class InputValueField(serializers.CharField):
26 """
27 Takes in an arbitrary value. Use to emulate our column and record creation and
28 update endpoints, which handle arbitrary data pulled from request.data
29 """
30 def to_internal_value(self, data):
31 return data
32
33 def to_representation(self, value):
34 return value
35
36
37 class NestedTableSerializer(serializers.HyperlinkedModelSerializer):
38 url = serializers.SerializerMethodField()
39
40 class Meta:
41 model = Table
42 fields = ['id', 'name', 'url']
43
44 def get_url(self, obj):
45 request = self.context['request']
46 return request.build_absolute_uri(reverse('table-detail', kwargs={'pk': obj.pk}))
47
48
49 class SchemaSerializer(serializers.HyperlinkedModelSerializer):
50 name = serializers.CharField()
51 database = ModelNameField(max_length=128)
52
53 class Meta:
54 model = Schema
55 fields = ['id', 'name', 'database', 'has_dependencies']
56
57
58 class TypeOptionSerializer(serializers.Serializer):
59 precision = serializers.IntegerField(required=False)
60 scale = serializers.IntegerField(required=False)
61
62 def run_validation(self, data=empty):
63 # Ensure that there are no unknown type options passed in.
64 if data is not empty:
65 unknown = set(data) - set(self.fields)
66 if unknown:
67 errors = ['Unknown field: {}'.format(field) for field in unknown]
68 raise serializers.ValidationError({
69 api_settings.NON_FIELD_ERRORS_KEY: errors,
70 })
71
72 return super(TypeOptionSerializer, self).run_validation(data)
73
74
75 class SimpleColumnSerializer(serializers.Serializer):
76 name = serializers.CharField()
77 type = serializers.CharField(source='plain_type')
78 type_options = TypeOptionSerializer(required=False, allow_null=True)
79
80
81 class ColumnSerializer(SimpleColumnSerializer):
82 name = serializers.CharField(required=False)
83
84 # From scratch fields
85 type = serializers.CharField(source='plain_type', required=False)
86 nullable = serializers.BooleanField(default=True)
87 primary_key = serializers.BooleanField(default=False)
88
89 # From duplication fields
90 source_column = serializers.IntegerField(required=False, write_only=True)
91 copy_source_data = serializers.BooleanField(default=True, write_only=True)
92 copy_source_constraints = serializers.BooleanField(default=True, write_only=True)
93
94 # Read only fields
95 index = serializers.IntegerField(source='column_index', read_only=True)
96 valid_target_types = serializers.ListField(read_only=True)
97 default = InputValueField(
98 source='default_value', read_only=False, default=None, allow_null=True
99 )
100
101 def validate(self, data):
102 if not self.partial:
103 from_scratch_required_fields = ['name', 'type']
104 from_scratch_specific_fields = ['type', 'nullable', 'primary_key']
105 from_dupe_required_fields = ['source_column']
106 from_dupe_specific_fields = ['source_column', 'copy_source_data',
107 'copy_source_constraints']
108
109 # Note that we run validation on self.initial_data, as `data` has defaults
110 # filled in for fields that weren't specified by the request
111 from_scratch_required_all = all([
112 f in self.initial_data for f in from_scratch_required_fields
113 ])
114 from_scratch_specific_in = [
115 f for f in from_scratch_specific_fields if f in self.initial_data
116 ]
117 from_dupe_required_all = all([
118 f in self.initial_data for f in from_dupe_required_fields
119 ])
120 from_dupe_specific_in = [
121 f for f in from_dupe_specific_fields if f in self.initial_data
122 ]
123
124 if len(from_dupe_specific_in) and len(from_scratch_specific_in):
125 raise ValidationError(
126 f'{from_scratch_specific_in} cannot be passed in if '
127 f'{from_dupe_specific_in} has also been passed in.'
128 )
129 elif not from_dupe_required_all and not from_scratch_required_all:
130 # We default to from scratch required fields if no fields are passed
131 if len(from_dupe_specific_in) and not len(from_scratch_specific_in):
132 required_fields = from_dupe_required_fields
133 else:
134 required_fields = from_scratch_required_fields
135 raise ValidationError({
136 f: ['This field is required.']
137 for f in required_fields
138 if f not in self.initial_data
139 })
140 return data
141
142
143 class TableSerializer(serializers.ModelSerializer):
144 columns = SimpleColumnSerializer(many=True, source='sa_columns', required=False)
145 records_url = serializers.SerializerMethodField()
146 constraints_url = serializers.SerializerMethodField()
147 columns_url = serializers.SerializerMethodField()
148 type_suggestions_url = serializers.SerializerMethodField()
149 previews_url = serializers.SerializerMethodField()
150 name = serializers.CharField(required=False, allow_blank=True, default='')
151 data_files = serializers.PrimaryKeyRelatedField(
152 required=False, many=True, queryset=DataFile.objects.all()
153 )
154
155 class Meta:
156 model = Table
157 fields = ['id', 'name', 'schema', 'created_at', 'updated_at', 'import_verified',
158 'columns', 'records_url', 'constraints_url', 'columns_url',
159 'type_suggestions_url', 'previews_url', 'data_files',
160 'has_dependencies']
161
162 def get_records_url(self, obj):
163 if isinstance(obj, Table):
164 # Only get records if we are serializing an existing table
165 request = self.context['request']
166 return request.build_absolute_uri(reverse('table-record-list', kwargs={'table_pk': obj.pk}))
167 else:
168 return None
169
170 def get_constraints_url(self, obj):
171 if isinstance(obj, Table):
172 # Only get constraints if we are serializing an existing table
173 request = self.context['request']
174 return request.build_absolute_uri(reverse('table-constraint-list', kwargs={'table_pk': obj.pk}))
175 else:
176 return None
177
178 def get_columns_url(self, obj):
179 if isinstance(obj, Table):
180 # Only get columns if we are serializing an existing table
181 request = self.context['request']
182 return request.build_absolute_uri(reverse('table-column-list', kwargs={'table_pk': obj.pk}))
183 else:
184 return None
185
186 def get_type_suggestions_url(self, obj):
187 if isinstance(obj, Table):
188 # Only get type suggestions if we are serializing an existing table
189 request = self.context['request']
190 return request.build_absolute_uri(reverse('table-type-suggestions', kwargs={'pk': obj.pk}))
191 else:
192 return None
193
194 def get_previews_url(self, obj):
195 if isinstance(obj, Table):
196 # Only get previews if we are serializing an existing table
197 request = self.context['request']
198 return request.build_absolute_uri(reverse('table-previews', kwargs={'pk': obj.pk}))
199 else:
200 return None
201
202 def validate_data_files(self, data_files):
203 if data_files and len(data_files) > 1:
204 raise ValidationError('Multiple data files are unsupported.')
205 return data_files
206
207
208 class RecordSerializer(serializers.BaseSerializer):
209 def to_representation(self, instance):
210 return instance._asdict()
211
212
213 class TablePreviewSerializer(serializers.Serializer):
214 name = serializers.CharField(required=False)
215 columns = SimpleColumnSerializer(many=True)
216
217
218 class RecordListParameterSerializer(serializers.Serializer):
219 filters = serializers.JSONField(required=False, default=[])
220 order_by = serializers.JSONField(required=False, default=[])
221 group_count_by = serializers.JSONField(required=False, default=[])
222
223
224 class TypeSerializer(serializers.Serializer):
225 identifier = serializers.CharField()
226 name = serializers.CharField()
227 db_types = serializers.ListField(child=serializers.CharField())
228
229
230 class DatabaseSerializer(serializers.ModelSerializer):
231 supported_types_url = serializers.SerializerMethodField()
232
233 class Meta:
234 model = Database
235 fields = ['id', 'name', 'deleted', 'supported_types_url']
236 read_only_fields = ['id', 'name', 'deleted', 'supported_types_url']
237
238 def get_supported_types_url(self, obj):
239 if isinstance(obj, Database):
240 # Only get records if we are serializing an existing table
241 request = self.context['request']
242 return request.build_absolute_uri(reverse('database-types', kwargs={'pk': obj.pk}))
243 else:
244 return None
245
246
247 class DataFileSerializer(serializers.ModelSerializer):
248 user = serializers.PrimaryKeyRelatedField(
249 default=serializers.CurrentUserDefault(), read_only=True
250 )
251 header = serializers.BooleanField(default=True)
252 paste = serializers.CharField(required=False, trim_whitespace=False)
253 url = serializers.URLField(required=False)
254
255 class Meta:
256 model = DataFile
257 fields = [
258 'id', 'file', 'table_imported_to', 'user', 'header', 'delimiter',
259 'escapechar', 'quotechar', 'paste', 'url', 'created_from'
260 ]
261 extra_kwargs = {
262 'file': {'required': False},
263 'delimiter': {'trim_whitespace': False},
264 'escapechar': {'trim_whitespace': False},
265 'quotechar': {'trim_whitespace': False}
266 }
267 # We only currently support importing to a new table, so setting a table via API is invalid.
268 # User should be set automatically, not submitted via the API.
269 read_only_fields = ['user', 'table_imported_to', 'created_from']
270 write_only_fields = ['paste', 'url']
271
272 def save(self, **kwargs):
273 """
274 Set user to current user while saving the data file.
275 """
276 current_user = self.fields['user'].get_default()
277 if current_user.is_authenticated:
278 kwargs['user'] = current_user
279 return super().save(**kwargs)
280
281 def validate(self, data):
282 if not self.partial:
283 # Only perform validation on source files when we're not partial
284 source_fields = ['file', 'paste', 'url']
285 present_fields = [field for field in source_fields if field in data]
286 if len(present_fields) > 1:
287 raise ValidationError(
288 f'Multiple source fields passed: {present_fields}.'
289 f' Only one of {source_fields} should be specified.'
290 )
291 elif len(present_fields) == 0:
292 raise ValidationError(
293 f'One of {source_fields} should be specified.'
294 )
295 return data
296
297 def validate_url(self, url):
298 try:
299 response = requests.head(url, allow_redirects=True)
300 except requests.exceptions.ConnectionError:
301 raise ValidationError('URL cannot be reached.')
302
303 content_type = response.headers.get('content-type')
304 if content_type not in SUPPORTED_URL_CONTENT_TYPES:
305 raise ValidationError(f"URL resource '{content_type}' not a valid type.")
306 return url
307
308
309 class ConstraintSerializer(serializers.ModelSerializer):
310 name = serializers.CharField(required=False)
311 type = serializers.CharField()
312 columns = serializers.ListField()
313
314 class Meta:
315 model = Constraint
316 fields = ['id', 'name', 'type', 'columns']
317
```
Path: `mathesar/filters.py`
Content:
```
1 from django_filters import BooleanFilter, DateTimeFromToRangeFilter
2 from django_property_filter import PropertyFilterSet, PropertyBaseInFilter, PropertyCharFilter
3
4 from mathesar.models import Schema, Table, Database
5
6
7 class CharInFilter(PropertyBaseInFilter, PropertyCharFilter):
8 pass
9
10
11 class SchemaFilter(PropertyFilterSet):
12 database = CharInFilter(field_name='database__name', lookup_expr='in')
13 name = CharInFilter(field_name='name', lookup_expr='in')
14
15 class Meta:
16 model = Schema
17 fields = ['name']
18
19
20 class TableFilter(PropertyFilterSet):
21 name = CharInFilter(field_name='name', lookup_expr='in')
22 created = DateTimeFromToRangeFilter(field_name='created_at')
23 updated = DateTimeFromToRangeFilter(field_name='updated_at')
24 not_imported = BooleanFilter(lookup_expr="isnull", field_name='import_verified')
25
26 class Meta:
27 model = Table
28 fields = ['name', 'schema', 'created_at', 'updated_at', 'import_verified']
29
30
31 class DatabaseFilter(PropertyFilterSet):
32 class Meta:
33 model = Database
34 fields = ['deleted']
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mathesar/filters.py b/mathesar/filters.py
--- a/mathesar/filters.py
+++ b/mathesar/filters.py
@@ -1,8 +1,25 @@
from django_filters import BooleanFilter, DateTimeFromToRangeFilter
from django_property_filter import PropertyFilterSet, PropertyBaseInFilter, PropertyCharFilter
+from mathesar.database.types import MathesarTypeIdentifier
from mathesar.models import Schema, Table, Database
+FILTER_OPTIONS_BY_TYPE_IDENTIFIER = {
+ MathesarTypeIdentifier.BOOLEAN.value:
+ {
+ "db_type": "BOOLEAN",
+ "options": [{
+ "op": "eq",
+ "value": {
+ "allowed_types": ["BOOLEAN"],
+ }
+ }, {
+ "op": "is_null",
+ "value": "null",
+ }]
+ }
+}
+
class CharInFilter(PropertyBaseInFilter, PropertyCharFilter):
pass
diff --git a/mathesar/serializers.py b/mathesar/serializers.py
--- a/mathesar/serializers.py
+++ b/mathesar/serializers.py
@@ -6,6 +6,7 @@
from rest_framework.fields import empty
from rest_framework.settings import api_settings
+from mathesar.filters import FILTER_OPTIONS_BY_TYPE_IDENTIFIER
from mathesar.models import Table, Schema, DataFile, Database, Constraint
@@ -225,6 +226,10 @@
identifier = serializers.CharField()
name = serializers.CharField()
db_types = serializers.ListField(child=serializers.CharField())
+ filters = serializers.SerializerMethodField()
+
+ def get_filters(self, obj):
+ return FILTER_OPTIONS_BY_TYPE_IDENTIFIER.get(obj.get('identifier'))
class DatabaseSerializer(serializers.ModelSerializer):
|
{"golden_diff": "diff --git a/mathesar/filters.py b/mathesar/filters.py\n--- a/mathesar/filters.py\n+++ b/mathesar/filters.py\n@@ -1,8 +1,25 @@\n from django_filters import BooleanFilter, DateTimeFromToRangeFilter\n from django_property_filter import PropertyFilterSet, PropertyBaseInFilter, PropertyCharFilter\n \n+from mathesar.database.types import MathesarTypeIdentifier\n from mathesar.models import Schema, Table, Database\n \n+FILTER_OPTIONS_BY_TYPE_IDENTIFIER = {\n+ MathesarTypeIdentifier.BOOLEAN.value:\n+ {\n+ \"db_type\": \"BOOLEAN\",\n+ \"options\": [{\n+ \"op\": \"eq\",\n+ \"value\": {\n+ \"allowed_types\": [\"BOOLEAN\"],\n+ }\n+ }, {\n+ \"op\": \"is_null\",\n+ \"value\": \"null\",\n+ }]\n+ }\n+}\n+\n \n class CharInFilter(PropertyBaseInFilter, PropertyCharFilter):\n pass\ndiff --git a/mathesar/serializers.py b/mathesar/serializers.py\n--- a/mathesar/serializers.py\n+++ b/mathesar/serializers.py\n@@ -6,6 +6,7 @@\n from rest_framework.fields import empty\n from rest_framework.settings import api_settings\n \n+from mathesar.filters import FILTER_OPTIONS_BY_TYPE_IDENTIFIER\n from mathesar.models import Table, Schema, DataFile, Database, Constraint\n \n \n@@ -225,6 +226,10 @@\n identifier = serializers.CharField()\n name = serializers.CharField()\n db_types = serializers.ListField(child=serializers.CharField())\n+ filters = serializers.SerializerMethodField()\n+\n+ def get_filters(self, obj):\n+ return FILTER_OPTIONS_BY_TYPE_IDENTIFIER.get(obj.get('identifier'))\n \n \n class DatabaseSerializer(serializers.ModelSerializer):\n", "issue": "Implement filtering options for Boolean types\n## Problem\r\nWe need to ensure that records that include columns of Boolean type support the following filters via API: \r\n- is {true/false}\r\n- is not {true/false}\r\n- is empty\r\n- is not empty\r\n\r\nThis may already be done, please verify that it works and add some test cases specifically for boolean data types.\r\n\r\nThis involves:\r\n- Implementing the filters in the backend\r\n- Updating the `/api/v0/databases/<id>/types/` endpoint to store available filters on this type\r\n - Filter information should include the number of parameters needing to be passed in (e.g. `is` needs 1 parameter, `is empty` needs 0)\r\n\r\n## Additional context\r\n- We're using our fork of `sqlalchemy-filters` to provide filtering. See: https://github.com/centerofci/sqlalchemy-filters.\r\n- See also: #387\r\n- #557 provides some context on storing filters.\n", "before_files": [{"content": "import requests\n\nfrom django.urls import reverse\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.fields import empty\nfrom rest_framework.settings import api_settings\n\nfrom mathesar.models import Table, Schema, DataFile, Database, Constraint\n\n\nSUPPORTED_URL_CONTENT_TYPES = {'text/csv', 'text/plain'}\n\n\nclass ModelNameField(serializers.CharField):\n \"\"\"\n De-serializes the request field as a string, but serializes the response field as\n `model.name`. Required to support passing and returing a model name from the\n endpoint, while also storing the model as a related field.\n \"\"\"\n def to_representation(self, value):\n return value.name\n\n\nclass InputValueField(serializers.CharField):\n \"\"\"\n Takes in an arbitrary value. Use to emulate our column and record creation and\n update endpoints, which handle arbitrary data pulled from request.data\n \"\"\"\n def to_internal_value(self, data):\n return data\n\n def to_representation(self, value):\n return value\n\n\nclass NestedTableSerializer(serializers.HyperlinkedModelSerializer):\n url = serializers.SerializerMethodField()\n\n class Meta:\n model = Table\n fields = ['id', 'name', 'url']\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-detail', kwargs={'pk': obj.pk}))\n\n\nclass SchemaSerializer(serializers.HyperlinkedModelSerializer):\n name = serializers.CharField()\n database = ModelNameField(max_length=128)\n\n class Meta:\n model = Schema\n fields = ['id', 'name', 'database', 'has_dependencies']\n\n\nclass TypeOptionSerializer(serializers.Serializer):\n precision = serializers.IntegerField(required=False)\n scale = serializers.IntegerField(required=False)\n\n def run_validation(self, data=empty):\n # Ensure that there are no unknown type options passed in.\n if data is not empty:\n unknown = set(data) - set(self.fields)\n if unknown:\n errors = ['Unknown field: {}'.format(field) for field in unknown]\n raise serializers.ValidationError({\n api_settings.NON_FIELD_ERRORS_KEY: errors,\n })\n\n return super(TypeOptionSerializer, self).run_validation(data)\n\n\nclass SimpleColumnSerializer(serializers.Serializer):\n name = serializers.CharField()\n type = serializers.CharField(source='plain_type')\n type_options = TypeOptionSerializer(required=False, allow_null=True)\n\n\nclass ColumnSerializer(SimpleColumnSerializer):\n name = serializers.CharField(required=False)\n\n # From scratch fields\n type = serializers.CharField(source='plain_type', required=False)\n nullable = serializers.BooleanField(default=True)\n primary_key = serializers.BooleanField(default=False)\n\n # From duplication fields\n source_column = serializers.IntegerField(required=False, write_only=True)\n copy_source_data = serializers.BooleanField(default=True, write_only=True)\n copy_source_constraints = serializers.BooleanField(default=True, write_only=True)\n\n # Read only fields\n index = serializers.IntegerField(source='column_index', read_only=True)\n valid_target_types = serializers.ListField(read_only=True)\n default = InputValueField(\n source='default_value', read_only=False, default=None, allow_null=True\n )\n\n def validate(self, data):\n if not self.partial:\n from_scratch_required_fields = ['name', 'type']\n from_scratch_specific_fields = ['type', 'nullable', 'primary_key']\n from_dupe_required_fields = ['source_column']\n from_dupe_specific_fields = ['source_column', 'copy_source_data',\n 'copy_source_constraints']\n\n # Note that we run validation on self.initial_data, as `data` has defaults\n # filled in for fields that weren't specified by the request\n from_scratch_required_all = all([\n f in self.initial_data for f in from_scratch_required_fields\n ])\n from_scratch_specific_in = [\n f for f in from_scratch_specific_fields if f in self.initial_data\n ]\n from_dupe_required_all = all([\n f in self.initial_data for f in from_dupe_required_fields\n ])\n from_dupe_specific_in = [\n f for f in from_dupe_specific_fields if f in self.initial_data\n ]\n\n if len(from_dupe_specific_in) and len(from_scratch_specific_in):\n raise ValidationError(\n f'{from_scratch_specific_in} cannot be passed in if '\n f'{from_dupe_specific_in} has also been passed in.'\n )\n elif not from_dupe_required_all and not from_scratch_required_all:\n # We default to from scratch required fields if no fields are passed\n if len(from_dupe_specific_in) and not len(from_scratch_specific_in):\n required_fields = from_dupe_required_fields\n else:\n required_fields = from_scratch_required_fields\n raise ValidationError({\n f: ['This field is required.']\n for f in required_fields\n if f not in self.initial_data\n })\n return data\n\n\nclass TableSerializer(serializers.ModelSerializer):\n columns = SimpleColumnSerializer(many=True, source='sa_columns', required=False)\n records_url = serializers.SerializerMethodField()\n constraints_url = serializers.SerializerMethodField()\n columns_url = serializers.SerializerMethodField()\n type_suggestions_url = serializers.SerializerMethodField()\n previews_url = serializers.SerializerMethodField()\n name = serializers.CharField(required=False, allow_blank=True, default='')\n data_files = serializers.PrimaryKeyRelatedField(\n required=False, many=True, queryset=DataFile.objects.all()\n )\n\n class Meta:\n model = Table\n fields = ['id', 'name', 'schema', 'created_at', 'updated_at', 'import_verified',\n 'columns', 'records_url', 'constraints_url', 'columns_url',\n 'type_suggestions_url', 'previews_url', 'data_files',\n 'has_dependencies']\n\n def get_records_url(self, obj):\n if isinstance(obj, Table):\n # Only get records if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-record-list', kwargs={'table_pk': obj.pk}))\n else:\n return None\n\n def get_constraints_url(self, obj):\n if isinstance(obj, Table):\n # Only get constraints if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-constraint-list', kwargs={'table_pk': obj.pk}))\n else:\n return None\n\n def get_columns_url(self, obj):\n if isinstance(obj, Table):\n # Only get columns if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-column-list', kwargs={'table_pk': obj.pk}))\n else:\n return None\n\n def get_type_suggestions_url(self, obj):\n if isinstance(obj, Table):\n # Only get type suggestions if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-type-suggestions', kwargs={'pk': obj.pk}))\n else:\n return None\n\n def get_previews_url(self, obj):\n if isinstance(obj, Table):\n # Only get previews if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-previews', kwargs={'pk': obj.pk}))\n else:\n return None\n\n def validate_data_files(self, data_files):\n if data_files and len(data_files) > 1:\n raise ValidationError('Multiple data files are unsupported.')\n return data_files\n\n\nclass RecordSerializer(serializers.BaseSerializer):\n def to_representation(self, instance):\n return instance._asdict()\n\n\nclass TablePreviewSerializer(serializers.Serializer):\n name = serializers.CharField(required=False)\n columns = SimpleColumnSerializer(many=True)\n\n\nclass RecordListParameterSerializer(serializers.Serializer):\n filters = serializers.JSONField(required=False, default=[])\n order_by = serializers.JSONField(required=False, default=[])\n group_count_by = serializers.JSONField(required=False, default=[])\n\n\nclass TypeSerializer(serializers.Serializer):\n identifier = serializers.CharField()\n name = serializers.CharField()\n db_types = serializers.ListField(child=serializers.CharField())\n\n\nclass DatabaseSerializer(serializers.ModelSerializer):\n supported_types_url = serializers.SerializerMethodField()\n\n class Meta:\n model = Database\n fields = ['id', 'name', 'deleted', 'supported_types_url']\n read_only_fields = ['id', 'name', 'deleted', 'supported_types_url']\n\n def get_supported_types_url(self, obj):\n if isinstance(obj, Database):\n # Only get records if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('database-types', kwargs={'pk': obj.pk}))\n else:\n return None\n\n\nclass DataFileSerializer(serializers.ModelSerializer):\n user = serializers.PrimaryKeyRelatedField(\n default=serializers.CurrentUserDefault(), read_only=True\n )\n header = serializers.BooleanField(default=True)\n paste = serializers.CharField(required=False, trim_whitespace=False)\n url = serializers.URLField(required=False)\n\n class Meta:\n model = DataFile\n fields = [\n 'id', 'file', 'table_imported_to', 'user', 'header', 'delimiter',\n 'escapechar', 'quotechar', 'paste', 'url', 'created_from'\n ]\n extra_kwargs = {\n 'file': {'required': False},\n 'delimiter': {'trim_whitespace': False},\n 'escapechar': {'trim_whitespace': False},\n 'quotechar': {'trim_whitespace': False}\n }\n # We only currently support importing to a new table, so setting a table via API is invalid.\n # User should be set automatically, not submitted via the API.\n read_only_fields = ['user', 'table_imported_to', 'created_from']\n write_only_fields = ['paste', 'url']\n\n def save(self, **kwargs):\n \"\"\"\n Set user to current user while saving the data file.\n \"\"\"\n current_user = self.fields['user'].get_default()\n if current_user.is_authenticated:\n kwargs['user'] = current_user\n return super().save(**kwargs)\n\n def validate(self, data):\n if not self.partial:\n # Only perform validation on source files when we're not partial\n source_fields = ['file', 'paste', 'url']\n present_fields = [field for field in source_fields if field in data]\n if len(present_fields) > 1:\n raise ValidationError(\n f'Multiple source fields passed: {present_fields}.'\n f' Only one of {source_fields} should be specified.'\n )\n elif len(present_fields) == 0:\n raise ValidationError(\n f'One of {source_fields} should be specified.'\n )\n return data\n\n def validate_url(self, url):\n try:\n response = requests.head(url, allow_redirects=True)\n except requests.exceptions.ConnectionError:\n raise ValidationError('URL cannot be reached.')\n\n content_type = response.headers.get('content-type')\n if content_type not in SUPPORTED_URL_CONTENT_TYPES:\n raise ValidationError(f\"URL resource '{content_type}' not a valid type.\")\n return url\n\n\nclass ConstraintSerializer(serializers.ModelSerializer):\n name = serializers.CharField(required=False)\n type = serializers.CharField()\n columns = serializers.ListField()\n\n class Meta:\n model = Constraint\n fields = ['id', 'name', 'type', 'columns']\n", "path": "mathesar/serializers.py"}, {"content": "from django_filters import BooleanFilter, DateTimeFromToRangeFilter\nfrom django_property_filter import PropertyFilterSet, PropertyBaseInFilter, PropertyCharFilter\n\nfrom mathesar.models import Schema, Table, Database\n\n\nclass CharInFilter(PropertyBaseInFilter, PropertyCharFilter):\n pass\n\n\nclass SchemaFilter(PropertyFilterSet):\n database = CharInFilter(field_name='database__name', lookup_expr='in')\n name = CharInFilter(field_name='name', lookup_expr='in')\n\n class Meta:\n model = Schema\n fields = ['name']\n\n\nclass TableFilter(PropertyFilterSet):\n name = CharInFilter(field_name='name', lookup_expr='in')\n created = DateTimeFromToRangeFilter(field_name='created_at')\n updated = DateTimeFromToRangeFilter(field_name='updated_at')\n not_imported = BooleanFilter(lookup_expr=\"isnull\", field_name='import_verified')\n\n class Meta:\n model = Table\n fields = ['name', 'schema', 'created_at', 'updated_at', 'import_verified']\n\n\nclass DatabaseFilter(PropertyFilterSet):\n class Meta:\n model = Database\n fields = ['deleted']\n", "path": "mathesar/filters.py"}], "after_files": [{"content": "import requests\n\nfrom django.urls import reverse\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.fields import empty\nfrom rest_framework.settings import api_settings\n\nfrom mathesar.filters import FILTER_OPTIONS_BY_TYPE_IDENTIFIER\nfrom mathesar.models import Table, Schema, DataFile, Database, Constraint\n\n\nSUPPORTED_URL_CONTENT_TYPES = {'text/csv', 'text/plain'}\n\n\nclass ModelNameField(serializers.CharField):\n \"\"\"\n De-serializes the request field as a string, but serializes the response field as\n `model.name`. Required to support passing and returing a model name from the\n endpoint, while also storing the model as a related field.\n \"\"\"\n def to_representation(self, value):\n return value.name\n\n\nclass InputValueField(serializers.CharField):\n \"\"\"\n Takes in an arbitrary value. Use to emulate our column and record creation and\n update endpoints, which handle arbitrary data pulled from request.data\n \"\"\"\n def to_internal_value(self, data):\n return data\n\n def to_representation(self, value):\n return value\n\n\nclass NestedTableSerializer(serializers.HyperlinkedModelSerializer):\n url = serializers.SerializerMethodField()\n\n class Meta:\n model = Table\n fields = ['id', 'name', 'url']\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-detail', kwargs={'pk': obj.pk}))\n\n\nclass SchemaSerializer(serializers.HyperlinkedModelSerializer):\n name = serializers.CharField()\n database = ModelNameField(max_length=128)\n\n class Meta:\n model = Schema\n fields = ['id', 'name', 'database', 'has_dependencies']\n\n\nclass TypeOptionSerializer(serializers.Serializer):\n precision = serializers.IntegerField(required=False)\n scale = serializers.IntegerField(required=False)\n\n def run_validation(self, data=empty):\n # Ensure that there are no unknown type options passed in.\n if data is not empty:\n unknown = set(data) - set(self.fields)\n if unknown:\n errors = ['Unknown field: {}'.format(field) for field in unknown]\n raise serializers.ValidationError({\n api_settings.NON_FIELD_ERRORS_KEY: errors,\n })\n\n return super(TypeOptionSerializer, self).run_validation(data)\n\n\nclass SimpleColumnSerializer(serializers.Serializer):\n name = serializers.CharField()\n type = serializers.CharField(source='plain_type')\n type_options = TypeOptionSerializer(required=False, allow_null=True)\n\n\nclass ColumnSerializer(SimpleColumnSerializer):\n name = serializers.CharField(required=False)\n\n # From scratch fields\n type = serializers.CharField(source='plain_type', required=False)\n nullable = serializers.BooleanField(default=True)\n primary_key = serializers.BooleanField(default=False)\n\n # From duplication fields\n source_column = serializers.IntegerField(required=False, write_only=True)\n copy_source_data = serializers.BooleanField(default=True, write_only=True)\n copy_source_constraints = serializers.BooleanField(default=True, write_only=True)\n\n # Read only fields\n index = serializers.IntegerField(source='column_index', read_only=True)\n valid_target_types = serializers.ListField(read_only=True)\n default = InputValueField(\n source='default_value', read_only=False, default=None, allow_null=True\n )\n\n def validate(self, data):\n if not self.partial:\n from_scratch_required_fields = ['name', 'type']\n from_scratch_specific_fields = ['type', 'nullable', 'primary_key']\n from_dupe_required_fields = ['source_column']\n from_dupe_specific_fields = ['source_column', 'copy_source_data',\n 'copy_source_constraints']\n\n # Note that we run validation on self.initial_data, as `data` has defaults\n # filled in for fields that weren't specified by the request\n from_scratch_required_all = all([\n f in self.initial_data for f in from_scratch_required_fields\n ])\n from_scratch_specific_in = [\n f for f in from_scratch_specific_fields if f in self.initial_data\n ]\n from_dupe_required_all = all([\n f in self.initial_data for f in from_dupe_required_fields\n ])\n from_dupe_specific_in = [\n f for f in from_dupe_specific_fields if f in self.initial_data\n ]\n\n if len(from_dupe_specific_in) and len(from_scratch_specific_in):\n raise ValidationError(\n f'{from_scratch_specific_in} cannot be passed in if '\n f'{from_dupe_specific_in} has also been passed in.'\n )\n elif not from_dupe_required_all and not from_scratch_required_all:\n # We default to from scratch required fields if no fields are passed\n if len(from_dupe_specific_in) and not len(from_scratch_specific_in):\n required_fields = from_dupe_required_fields\n else:\n required_fields = from_scratch_required_fields\n raise ValidationError({\n f: ['This field is required.']\n for f in required_fields\n if f not in self.initial_data\n })\n return data\n\n\nclass TableSerializer(serializers.ModelSerializer):\n columns = SimpleColumnSerializer(many=True, source='sa_columns', required=False)\n records_url = serializers.SerializerMethodField()\n constraints_url = serializers.SerializerMethodField()\n columns_url = serializers.SerializerMethodField()\n type_suggestions_url = serializers.SerializerMethodField()\n previews_url = serializers.SerializerMethodField()\n name = serializers.CharField(required=False, allow_blank=True, default='')\n data_files = serializers.PrimaryKeyRelatedField(\n required=False, many=True, queryset=DataFile.objects.all()\n )\n\n class Meta:\n model = Table\n fields = ['id', 'name', 'schema', 'created_at', 'updated_at', 'import_verified',\n 'columns', 'records_url', 'constraints_url', 'columns_url',\n 'type_suggestions_url', 'previews_url', 'data_files',\n 'has_dependencies']\n\n def get_records_url(self, obj):\n if isinstance(obj, Table):\n # Only get records if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-record-list', kwargs={'table_pk': obj.pk}))\n else:\n return None\n\n def get_constraints_url(self, obj):\n if isinstance(obj, Table):\n # Only get constraints if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-constraint-list', kwargs={'table_pk': obj.pk}))\n else:\n return None\n\n def get_columns_url(self, obj):\n if isinstance(obj, Table):\n # Only get columns if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-column-list', kwargs={'table_pk': obj.pk}))\n else:\n return None\n\n def get_type_suggestions_url(self, obj):\n if isinstance(obj, Table):\n # Only get type suggestions if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-type-suggestions', kwargs={'pk': obj.pk}))\n else:\n return None\n\n def get_previews_url(self, obj):\n if isinstance(obj, Table):\n # Only get previews if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-previews', kwargs={'pk': obj.pk}))\n else:\n return None\n\n def validate_data_files(self, data_files):\n if data_files and len(data_files) > 1:\n raise ValidationError('Multiple data files are unsupported.')\n return data_files\n\n\nclass RecordSerializer(serializers.BaseSerializer):\n def to_representation(self, instance):\n return instance._asdict()\n\n\nclass TablePreviewSerializer(serializers.Serializer):\n name = serializers.CharField(required=False)\n columns = SimpleColumnSerializer(many=True)\n\n\nclass RecordListParameterSerializer(serializers.Serializer):\n filters = serializers.JSONField(required=False, default=[])\n order_by = serializers.JSONField(required=False, default=[])\n group_count_by = serializers.JSONField(required=False, default=[])\n\n\nclass TypeSerializer(serializers.Serializer):\n identifier = serializers.CharField()\n name = serializers.CharField()\n db_types = serializers.ListField(child=serializers.CharField())\n filters = serializers.SerializerMethodField()\n\n def get_filters(self, obj):\n return FILTER_OPTIONS_BY_TYPE_IDENTIFIER.get(obj.get('identifier'))\n\n\nclass DatabaseSerializer(serializers.ModelSerializer):\n supported_types_url = serializers.SerializerMethodField()\n\n class Meta:\n model = Database\n fields = ['id', 'name', 'deleted', 'supported_types_url']\n read_only_fields = ['id', 'name', 'deleted', 'supported_types_url']\n\n def get_supported_types_url(self, obj):\n if isinstance(obj, Database):\n # Only get records if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('database-types', kwargs={'pk': obj.pk}))\n else:\n return None\n\n\nclass DataFileSerializer(serializers.ModelSerializer):\n user = serializers.PrimaryKeyRelatedField(\n default=serializers.CurrentUserDefault(), read_only=True\n )\n header = serializers.BooleanField(default=True)\n paste = serializers.CharField(required=False, trim_whitespace=False)\n url = serializers.URLField(required=False)\n\n class Meta:\n model = DataFile\n fields = [\n 'id', 'file', 'table_imported_to', 'user', 'header', 'delimiter',\n 'escapechar', 'quotechar', 'paste', 'url', 'created_from'\n ]\n extra_kwargs = {\n 'file': {'required': False},\n 'delimiter': {'trim_whitespace': False},\n 'escapechar': {'trim_whitespace': False},\n 'quotechar': {'trim_whitespace': False}\n }\n # We only currently support importing to a new table, so setting a table via API is invalid.\n # User should be set automatically, not submitted via the API.\n read_only_fields = ['user', 'table_imported_to', 'created_from']\n write_only_fields = ['paste', 'url']\n\n def save(self, **kwargs):\n \"\"\"\n Set user to current user while saving the data file.\n \"\"\"\n current_user = self.fields['user'].get_default()\n if current_user.is_authenticated:\n kwargs['user'] = current_user\n return super().save(**kwargs)\n\n def validate(self, data):\n if not self.partial:\n # Only perform validation on source files when we're not partial\n source_fields = ['file', 'paste', 'url']\n present_fields = [field for field in source_fields if field in data]\n if len(present_fields) > 1:\n raise ValidationError(\n f'Multiple source fields passed: {present_fields}.'\n f' Only one of {source_fields} should be specified.'\n )\n elif len(present_fields) == 0:\n raise ValidationError(\n f'One of {source_fields} should be specified.'\n )\n return data\n\n def validate_url(self, url):\n try:\n response = requests.head(url, allow_redirects=True)\n except requests.exceptions.ConnectionError:\n raise ValidationError('URL cannot be reached.')\n\n content_type = response.headers.get('content-type')\n if content_type not in SUPPORTED_URL_CONTENT_TYPES:\n raise ValidationError(f\"URL resource '{content_type}' not a valid type.\")\n return url\n\n\nclass ConstraintSerializer(serializers.ModelSerializer):\n name = serializers.CharField(required=False)\n type = serializers.CharField()\n columns = serializers.ListField()\n\n class Meta:\n model = Constraint\n fields = ['id', 'name', 'type', 'columns']\n", "path": "mathesar/serializers.py"}, {"content": "from django_filters import BooleanFilter, DateTimeFromToRangeFilter\nfrom django_property_filter import PropertyFilterSet, PropertyBaseInFilter, PropertyCharFilter\n\nfrom mathesar.database.types import MathesarTypeIdentifier\nfrom mathesar.models import Schema, Table, Database\n\nFILTER_OPTIONS_BY_TYPE_IDENTIFIER = {\n MathesarTypeIdentifier.BOOLEAN.value:\n {\n \"db_type\": \"BOOLEAN\",\n \"options\": [{\n \"op\": \"eq\",\n \"value\": {\n \"allowed_types\": [\"BOOLEAN\"],\n }\n }, {\n \"op\": \"is_null\",\n \"value\": \"null\",\n }]\n }\n}\n\n\nclass CharInFilter(PropertyBaseInFilter, PropertyCharFilter):\n pass\n\n\nclass SchemaFilter(PropertyFilterSet):\n database = CharInFilter(field_name='database__name', lookup_expr='in')\n name = CharInFilter(field_name='name', lookup_expr='in')\n\n class Meta:\n model = Schema\n fields = ['name']\n\n\nclass TableFilter(PropertyFilterSet):\n name = CharInFilter(field_name='name', lookup_expr='in')\n created = DateTimeFromToRangeFilter(field_name='created_at')\n updated = DateTimeFromToRangeFilter(field_name='updated_at')\n not_imported = BooleanFilter(lookup_expr=\"isnull\", field_name='import_verified')\n\n class Meta:\n model = Table\n fields = ['name', 'schema', 'created_at', 'updated_at', 'import_verified']\n\n\nclass DatabaseFilter(PropertyFilterSet):\n class Meta:\n model = Database\n fields = ['deleted']\n", "path": "mathesar/filters.py"}]}
| 4,064 | 376 |
gh_patches_debug_6582
|
rasdani/github-patches
|
git_diff
|
ytdl-org__youtube-dl-6539
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Handle protocol-relative URLs
Trying to download http://www.funnyordie.com/videos/ea20db28f8/kristen-stewart-jesse-eisenberg-interview-each-other (warning: autostarting video) fails with:
```
[debug] System config: []
[debug] User config: []
[debug] Command-line args: [u'-v', u'http://www.funnyordie.com/videos/ea20db28f8/kristen-stewart-jesse-eisenberg-interview-each-other']
[debug] Encodings: locale utf-8, fs utf-8, out utf-8, pref utf-8
[debug] youtube-dl version 2015.07.28
[debug] Python version 2.7.10 - Darwin-13.4.0-x86_64-i386-64bit
[debug] exe versions: ffmpeg 2.7.2, ffprobe 2.7.2, rtmpdump 2.4
[debug] Proxy map: {}
[FunnyOrDie] ea20db28f8: Downloading webpage
[debug] Invoking downloader on u'//vo.fod4.com/v/ea20db28f8/v2500.mp4'
Traceback (most recent call last):
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/runpy.py", line 162, in _run_module_as_main
"__main__", fname, loader, pkg_name)
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/runpy.py", line 72, in _run_code
exec code in run_globals
File "/opt/local/bin/youtube-dl/__main__.py", line 19, in <module>
File "/opt/local/bin/youtube-dl/youtube_dl/__init__.py", line 410, in main
File "/opt/local/bin/youtube-dl/youtube_dl/__init__.py", line 400, in _real_main
File "/opt/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 1504, in download
File "/opt/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 667, in extract_info
File "/opt/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 713, in process_ie_result
File "/opt/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 1174, in process_video_result
File "/opt/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 1436, in process_info
File "/opt/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 1378, in dl
File "/opt/local/bin/youtube-dl/youtube_dl/downloader/common.py", line 342, in download
File "/opt/local/bin/youtube-dl/youtube_dl/downloader/http.py", line 59, in real_download
File "/opt/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 1732, in urlopen
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib2.py", line 423, in open
protocol = req.get_type()
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib2.py", line 285, in get_type
raise ValueError, "unknown url type: %s" % self.__original
ValueError: unknown url type: //vo.fod4.com/v/ea20db28f8/v2500.mp4
```
Plugging the url into wget (with a scheme) correctly downloads the video so youtube-dl finds it just fine, it just needs to resolve the protocol-relative URL by re-using the protocol of the source page.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `youtube_dl/extractor/funnyordie.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import json
4 import re
5
6 from .common import InfoExtractor
7 from ..utils import ExtractorError
8
9
10 class FunnyOrDieIE(InfoExtractor):
11 _VALID_URL = r'https?://(?:www\.)?funnyordie\.com/(?P<type>embed|articles|videos)/(?P<id>[0-9a-f]+)(?:$|[?#/])'
12 _TESTS = [{
13 'url': 'http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version',
14 'md5': 'bcd81e0c4f26189ee09be362ad6e6ba9',
15 'info_dict': {
16 'id': '0732f586d7',
17 'ext': 'mp4',
18 'title': 'Heart-Shaped Box: Literal Video Version',
19 'description': 'md5:ea09a01bc9a1c46d9ab696c01747c338',
20 'thumbnail': 're:^http:.*\.jpg$',
21 },
22 }, {
23 'url': 'http://www.funnyordie.com/embed/e402820827',
24 'info_dict': {
25 'id': 'e402820827',
26 'ext': 'mp4',
27 'title': 'Please Use This Song (Jon Lajoie)',
28 'description': 'Please use this to sell something. www.jonlajoie.com',
29 'thumbnail': 're:^http:.*\.jpg$',
30 },
31 }, {
32 'url': 'http://www.funnyordie.com/articles/ebf5e34fc8/10-hours-of-walking-in-nyc-as-a-man',
33 'only_matching': True,
34 }]
35
36 def _real_extract(self, url):
37 mobj = re.match(self._VALID_URL, url)
38
39 video_id = mobj.group('id')
40 webpage = self._download_webpage(url, video_id)
41
42 links = re.findall(r'<source src="([^"]+/v)[^"]+\.([^"]+)" type=\'video', webpage)
43 if not links:
44 raise ExtractorError('No media links available for %s' % video_id)
45
46 links.sort(key=lambda link: 1 if link[1] == 'mp4' else 0)
47
48 bitrates = self._html_search_regex(r'<source src="[^"]+/v,((?:\d+,)+)\.mp4\.csmil', webpage, 'video bitrates')
49 bitrates = [int(b) for b in bitrates.rstrip(',').split(',')]
50 bitrates.sort()
51
52 formats = []
53 for bitrate in bitrates:
54 for link in links:
55 formats.append({
56 'url': '%s%d.%s' % (link[0], bitrate, link[1]),
57 'format_id': '%s-%d' % (link[1], bitrate),
58 'vbr': bitrate,
59 })
60
61 subtitles = {}
62 for src, src_lang in re.findall(r'<track kind="captions" src="([^"]+)" srclang="([^"]+)"', webpage):
63 subtitles[src_lang] = [{
64 'ext': src.split('/')[-1],
65 'url': 'http://www.funnyordie.com%s' % src,
66 }]
67
68 post_json = self._search_regex(
69 r'fb_post\s*=\s*(\{.*?\});', webpage, 'post details')
70 post = json.loads(post_json)
71
72 return {
73 'id': video_id,
74 'title': post['name'],
75 'description': post.get('description'),
76 'thumbnail': post.get('picture'),
77 'formats': formats,
78 'subtitles': subtitles,
79 }
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/youtube_dl/extractor/funnyordie.py b/youtube_dl/extractor/funnyordie.py
--- a/youtube_dl/extractor/funnyordie.py
+++ b/youtube_dl/extractor/funnyordie.py
@@ -53,7 +53,7 @@
for bitrate in bitrates:
for link in links:
formats.append({
- 'url': '%s%d.%s' % (link[0], bitrate, link[1]),
+ 'url': self._proto_relative_url('%s%d.%s' % (link[0], bitrate, link[1])),
'format_id': '%s-%d' % (link[1], bitrate),
'vbr': bitrate,
})
|
{"golden_diff": "diff --git a/youtube_dl/extractor/funnyordie.py b/youtube_dl/extractor/funnyordie.py\n--- a/youtube_dl/extractor/funnyordie.py\n+++ b/youtube_dl/extractor/funnyordie.py\n@@ -53,7 +53,7 @@\n for bitrate in bitrates:\n for link in links:\n formats.append({\n- 'url': '%s%d.%s' % (link[0], bitrate, link[1]),\n+ 'url': self._proto_relative_url('%s%d.%s' % (link[0], bitrate, link[1])),\n 'format_id': '%s-%d' % (link[1], bitrate),\n 'vbr': bitrate,\n })\n", "issue": "Handle protocol-relative URLs\nTrying to download http://www.funnyordie.com/videos/ea20db28f8/kristen-stewart-jesse-eisenberg-interview-each-other (warning: autostarting video) fails with:\n\n```\n[debug] System config: []\n[debug] User config: []\n[debug] Command-line args: [u'-v', u'http://www.funnyordie.com/videos/ea20db28f8/kristen-stewart-jesse-eisenberg-interview-each-other']\n[debug] Encodings: locale utf-8, fs utf-8, out utf-8, pref utf-8\n[debug] youtube-dl version 2015.07.28\n[debug] Python version 2.7.10 - Darwin-13.4.0-x86_64-i386-64bit\n[debug] exe versions: ffmpeg 2.7.2, ffprobe 2.7.2, rtmpdump 2.4\n[debug] Proxy map: {}\n[FunnyOrDie] ea20db28f8: Downloading webpage\n[debug] Invoking downloader on u'//vo.fod4.com/v/ea20db28f8/v2500.mp4'\nTraceback (most recent call last):\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/runpy.py\", line 162, in _run_module_as_main\n \"__main__\", fname, loader, pkg_name)\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/runpy.py\", line 72, in _run_code\n exec code in run_globals\n File \"/opt/local/bin/youtube-dl/__main__.py\", line 19, in <module>\n File \"/opt/local/bin/youtube-dl/youtube_dl/__init__.py\", line 410, in main\n File \"/opt/local/bin/youtube-dl/youtube_dl/__init__.py\", line 400, in _real_main\n File \"/opt/local/bin/youtube-dl/youtube_dl/YoutubeDL.py\", line 1504, in download\n File \"/opt/local/bin/youtube-dl/youtube_dl/YoutubeDL.py\", line 667, in extract_info\n File \"/opt/local/bin/youtube-dl/youtube_dl/YoutubeDL.py\", line 713, in process_ie_result\n File \"/opt/local/bin/youtube-dl/youtube_dl/YoutubeDL.py\", line 1174, in process_video_result\n File \"/opt/local/bin/youtube-dl/youtube_dl/YoutubeDL.py\", line 1436, in process_info\n File \"/opt/local/bin/youtube-dl/youtube_dl/YoutubeDL.py\", line 1378, in dl\n File \"/opt/local/bin/youtube-dl/youtube_dl/downloader/common.py\", line 342, in download\n File \"/opt/local/bin/youtube-dl/youtube_dl/downloader/http.py\", line 59, in real_download\n File \"/opt/local/bin/youtube-dl/youtube_dl/YoutubeDL.py\", line 1732, in urlopen\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib2.py\", line 423, in open\n protocol = req.get_type()\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/urllib2.py\", line 285, in get_type\n raise ValueError, \"unknown url type: %s\" % self.__original\nValueError: unknown url type: //vo.fod4.com/v/ea20db28f8/v2500.mp4\n```\n\nPlugging the url into wget (with a scheme) correctly downloads the video so youtube-dl finds it just fine, it just needs to resolve the protocol-relative URL by re-using the protocol of the source page.\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport json\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import ExtractorError\n\n\nclass FunnyOrDieIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?funnyordie\\.com/(?P<type>embed|articles|videos)/(?P<id>[0-9a-f]+)(?:$|[?#/])'\n _TESTS = [{\n 'url': 'http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version',\n 'md5': 'bcd81e0c4f26189ee09be362ad6e6ba9',\n 'info_dict': {\n 'id': '0732f586d7',\n 'ext': 'mp4',\n 'title': 'Heart-Shaped Box: Literal Video Version',\n 'description': 'md5:ea09a01bc9a1c46d9ab696c01747c338',\n 'thumbnail': 're:^http:.*\\.jpg$',\n },\n }, {\n 'url': 'http://www.funnyordie.com/embed/e402820827',\n 'info_dict': {\n 'id': 'e402820827',\n 'ext': 'mp4',\n 'title': 'Please Use This Song (Jon Lajoie)',\n 'description': 'Please use this to sell something. www.jonlajoie.com',\n 'thumbnail': 're:^http:.*\\.jpg$',\n },\n }, {\n 'url': 'http://www.funnyordie.com/articles/ebf5e34fc8/10-hours-of-walking-in-nyc-as-a-man',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n mobj = re.match(self._VALID_URL, url)\n\n video_id = mobj.group('id')\n webpage = self._download_webpage(url, video_id)\n\n links = re.findall(r'<source src=\"([^\"]+/v)[^\"]+\\.([^\"]+)\" type=\\'video', webpage)\n if not links:\n raise ExtractorError('No media links available for %s' % video_id)\n\n links.sort(key=lambda link: 1 if link[1] == 'mp4' else 0)\n\n bitrates = self._html_search_regex(r'<source src=\"[^\"]+/v,((?:\\d+,)+)\\.mp4\\.csmil', webpage, 'video bitrates')\n bitrates = [int(b) for b in bitrates.rstrip(',').split(',')]\n bitrates.sort()\n\n formats = []\n for bitrate in bitrates:\n for link in links:\n formats.append({\n 'url': '%s%d.%s' % (link[0], bitrate, link[1]),\n 'format_id': '%s-%d' % (link[1], bitrate),\n 'vbr': bitrate,\n })\n\n subtitles = {}\n for src, src_lang in re.findall(r'<track kind=\"captions\" src=\"([^\"]+)\" srclang=\"([^\"]+)\"', webpage):\n subtitles[src_lang] = [{\n 'ext': src.split('/')[-1],\n 'url': 'http://www.funnyordie.com%s' % src,\n }]\n\n post_json = self._search_regex(\n r'fb_post\\s*=\\s*(\\{.*?\\});', webpage, 'post details')\n post = json.loads(post_json)\n\n return {\n 'id': video_id,\n 'title': post['name'],\n 'description': post.get('description'),\n 'thumbnail': post.get('picture'),\n 'formats': formats,\n 'subtitles': subtitles,\n }\n", "path": "youtube_dl/extractor/funnyordie.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport json\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import ExtractorError\n\n\nclass FunnyOrDieIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?funnyordie\\.com/(?P<type>embed|articles|videos)/(?P<id>[0-9a-f]+)(?:$|[?#/])'\n _TESTS = [{\n 'url': 'http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version',\n 'md5': 'bcd81e0c4f26189ee09be362ad6e6ba9',\n 'info_dict': {\n 'id': '0732f586d7',\n 'ext': 'mp4',\n 'title': 'Heart-Shaped Box: Literal Video Version',\n 'description': 'md5:ea09a01bc9a1c46d9ab696c01747c338',\n 'thumbnail': 're:^http:.*\\.jpg$',\n },\n }, {\n 'url': 'http://www.funnyordie.com/embed/e402820827',\n 'info_dict': {\n 'id': 'e402820827',\n 'ext': 'mp4',\n 'title': 'Please Use This Song (Jon Lajoie)',\n 'description': 'Please use this to sell something. www.jonlajoie.com',\n 'thumbnail': 're:^http:.*\\.jpg$',\n },\n }, {\n 'url': 'http://www.funnyordie.com/articles/ebf5e34fc8/10-hours-of-walking-in-nyc-as-a-man',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n mobj = re.match(self._VALID_URL, url)\n\n video_id = mobj.group('id')\n webpage = self._download_webpage(url, video_id)\n\n links = re.findall(r'<source src=\"([^\"]+/v)[^\"]+\\.([^\"]+)\" type=\\'video', webpage)\n if not links:\n raise ExtractorError('No media links available for %s' % video_id)\n\n links.sort(key=lambda link: 1 if link[1] == 'mp4' else 0)\n\n bitrates = self._html_search_regex(r'<source src=\"[^\"]+/v,((?:\\d+,)+)\\.mp4\\.csmil', webpage, 'video bitrates')\n bitrates = [int(b) for b in bitrates.rstrip(',').split(',')]\n bitrates.sort()\n\n formats = []\n for bitrate in bitrates:\n for link in links:\n formats.append({\n 'url': self._proto_relative_url('%s%d.%s' % (link[0], bitrate, link[1])),\n 'format_id': '%s-%d' % (link[1], bitrate),\n 'vbr': bitrate,\n })\n\n subtitles = {}\n for src, src_lang in re.findall(r'<track kind=\"captions\" src=\"([^\"]+)\" srclang=\"([^\"]+)\"', webpage):\n subtitles[src_lang] = [{\n 'ext': src.split('/')[-1],\n 'url': 'http://www.funnyordie.com%s' % src,\n }]\n\n post_json = self._search_regex(\n r'fb_post\\s*=\\s*(\\{.*?\\});', webpage, 'post details')\n post = json.loads(post_json)\n\n return {\n 'id': video_id,\n 'title': post['name'],\n 'description': post.get('description'),\n 'thumbnail': post.get('picture'),\n 'formats': formats,\n 'subtitles': subtitles,\n }\n", "path": "youtube_dl/extractor/funnyordie.py"}]}
| 2,162 | 163 |
gh_patches_debug_27210
|
rasdani/github-patches
|
git_diff
|
sql-machine-learning__elasticdl-282
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[master]Use different RPC for reporting gradient and task result.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl/master/servicer.py`
Content:
```
1 import threading
2 import numpy as np
3
4 import tensorflow as tf
5 assert tf.executing_eagerly()
6
7 from proto import master_pb2
8 from proto import master_pb2_grpc
9 from util.ndarray import ndarray_to_tensor, tensor_to_ndarray
10
11
12 class MasterServicer(master_pb2_grpc.MasterServicer):
13 """Master service implementation"""
14
15 def __init__(self, logger, grads_to_wait, optimizer):
16 self.logger = logger
17 self._opt = optimizer
18 self._lock = threading.Lock()
19 # TODO: random initialization
20 # A <string, tf.ResourceVariable> map. We use tf.ResourceVariable
21 # instead ndarray to avoid copying and conversion when calling
22 # optimizer's apply_gradients() function.
23 self._model = {}
24 self._version = 0
25 self._gradient_sum = {}
26 self._grad_to_wait = grads_to_wait
27 self._grad_n = 0
28
29 def _set_model_var(self, name, value):
30 """Add or set model variable. Value should be a float32 ndarray"""
31 if value.dtype != np.float32:
32 raise ValueError("Value should be a float32 numpy array")
33 self._model[name] = tf.Variable(value, name=name)
34
35 def GetTask(self, request, context):
36 # TODO: implent task queues. Return an empty task for now.
37 res = master_pb2.Task()
38 res.shard_file_name = ""
39 res.model_version = self._version
40 return res
41
42 def GetModel(self, request, context):
43 if request.min_version > self._version:
44 err_msg = (
45 "Requested version %d not available yet, current version: %d"
46 % (request.min_version, self._version)
47 )
48 self.logger.warning(err_msg)
49 raise ValueError(err_msg)
50
51 res = master_pb2.Model()
52 with self._lock:
53 res.version = self._version
54 for k, v in self._model.items():
55 res.param[k].CopyFrom(ndarray_to_tensor(v.numpy()))
56 return res
57
58 def _update_model(self):
59 assert self._lock.locked()
60 grad_var = []
61 for k in self._gradient_sum:
62 self._gradient_sum[k] = self._gradient_sum[k] / self._grad_to_wait
63 grad_var.append((self._gradient_sum[k], self._model[k]))
64 self._opt.apply_gradients(grad_var)
65 self._version += 1
66 self._gradient_sum.clear()
67 self._grad_n = 0
68
69 def ReportTaskResult(self, request, context):
70 if request.model_version > self._version:
71 err_msg = "Model version %d out of range, current version: %d" % (
72 request.model_version,
73 self._version,
74 )
75 self.logger.warning(err_msg)
76 raise ValueError(err_msg)
77
78 res = master_pb2.ReportTaskResultReply()
79 if request.model_version < self._version:
80 self.logger.warning(
81 "Task result for outdated version %d dropped",
82 request.model_version,
83 )
84 res.accepted = False
85 res.model_version = self._version
86 return res
87
88 if request.err_message:
89 self.logger.warning("Worker error: %s" % request.err_message)
90 res.accepted = False
91 res.model_version = self._version
92 return res
93
94 # TODO: Update task queue with task_id
95 with self._lock:
96 tmp = {}
97 # Do sanity check before accumulating gradients.
98 for k, v in request.gradient.items():
99 if k not in self._model:
100 raise ValueError(
101 "Gradient key: %s is not part of model", k
102 )
103 arr = tensor_to_ndarray(v)
104 if arr.shape != self._model[k].numpy().shape:
105 raise ValueError(
106 "Gradient key: %s has incompatible dimension", k
107 )
108 tmp[k] = arr
109
110 for k, v in tmp.items():
111 if k in self._gradient_sum:
112 self._gradient_sum[k] = self._gradient_sum[k] + v
113 else:
114 self._gradient_sum[k] = v
115
116 self._grad_n += 1
117 if self._grad_n >= self._grad_to_wait:
118 self._update_model()
119 res.accepted = True
120 res.model_version = self._version
121 return res
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/elasticdl/master/servicer.py b/elasticdl/master/servicer.py
--- a/elasticdl/master/servicer.py
+++ b/elasticdl/master/servicer.py
@@ -66,7 +66,7 @@
self._gradient_sum.clear()
self._grad_n = 0
- def ReportTaskResult(self, request, context):
+ def ReportGradient(self, request, context):
if request.model_version > self._version:
err_msg = "Model version %d out of range, current version: %d" % (
request.model_version,
@@ -75,7 +75,7 @@
self.logger.warning(err_msg)
raise ValueError(err_msg)
- res = master_pb2.ReportTaskResultReply()
+ res = master_pb2.ReportGradientReply()
if request.model_version < self._version:
self.logger.warning(
"Task result for outdated version %d dropped",
@@ -85,12 +85,6 @@
res.model_version = self._version
return res
- if request.err_message:
- self.logger.warning("Worker error: %s" % request.err_message)
- res.accepted = False
- res.model_version = self._version
- return res
-
# TODO: Update task queue with task_id
with self._lock:
tmp = {}
|
{"golden_diff": "diff --git a/elasticdl/master/servicer.py b/elasticdl/master/servicer.py\n--- a/elasticdl/master/servicer.py\n+++ b/elasticdl/master/servicer.py\n@@ -66,7 +66,7 @@\n self._gradient_sum.clear()\n self._grad_n = 0\n \n- def ReportTaskResult(self, request, context):\n+ def ReportGradient(self, request, context):\n if request.model_version > self._version:\n err_msg = \"Model version %d out of range, current version: %d\" % (\n request.model_version,\n@@ -75,7 +75,7 @@\n self.logger.warning(err_msg)\n raise ValueError(err_msg)\n \n- res = master_pb2.ReportTaskResultReply()\n+ res = master_pb2.ReportGradientReply()\n if request.model_version < self._version:\n self.logger.warning(\n \"Task result for outdated version %d dropped\",\n@@ -85,12 +85,6 @@\n res.model_version = self._version\n return res\n \n- if request.err_message:\n- self.logger.warning(\"Worker error: %s\" % request.err_message)\n- res.accepted = False\n- res.model_version = self._version\n- return res\n-\n # TODO: Update task queue with task_id\n with self._lock:\n tmp = {}\n", "issue": "[master]Use different RPC for reporting gradient and task result.\n\n", "before_files": [{"content": "import threading\nimport numpy as np\n\nimport tensorflow as tf\nassert tf.executing_eagerly()\n\nfrom proto import master_pb2\nfrom proto import master_pb2_grpc\nfrom util.ndarray import ndarray_to_tensor, tensor_to_ndarray\n\n\nclass MasterServicer(master_pb2_grpc.MasterServicer):\n \"\"\"Master service implementation\"\"\"\n\n def __init__(self, logger, grads_to_wait, optimizer):\n self.logger = logger\n self._opt = optimizer\n self._lock = threading.Lock()\n # TODO: random initialization\n # A <string, tf.ResourceVariable> map. We use tf.ResourceVariable\n # instead ndarray to avoid copying and conversion when calling\n # optimizer's apply_gradients() function.\n self._model = {}\n self._version = 0\n self._gradient_sum = {}\n self._grad_to_wait = grads_to_wait\n self._grad_n = 0\n\n def _set_model_var(self, name, value):\n \"\"\"Add or set model variable. Value should be a float32 ndarray\"\"\"\n if value.dtype != np.float32:\n raise ValueError(\"Value should be a float32 numpy array\")\n self._model[name] = tf.Variable(value, name=name)\n\n def GetTask(self, request, context):\n # TODO: implent task queues. Return an empty task for now.\n res = master_pb2.Task()\n res.shard_file_name = \"\"\n res.model_version = self._version\n return res\n\n def GetModel(self, request, context):\n if request.min_version > self._version:\n err_msg = (\n \"Requested version %d not available yet, current version: %d\"\n % (request.min_version, self._version)\n )\n self.logger.warning(err_msg)\n raise ValueError(err_msg)\n\n res = master_pb2.Model()\n with self._lock:\n res.version = self._version\n for k, v in self._model.items():\n res.param[k].CopyFrom(ndarray_to_tensor(v.numpy()))\n return res\n\n def _update_model(self):\n assert self._lock.locked()\n grad_var = []\n for k in self._gradient_sum:\n self._gradient_sum[k] = self._gradient_sum[k] / self._grad_to_wait\n grad_var.append((self._gradient_sum[k], self._model[k]))\n self._opt.apply_gradients(grad_var)\n self._version += 1\n self._gradient_sum.clear()\n self._grad_n = 0\n\n def ReportTaskResult(self, request, context):\n if request.model_version > self._version:\n err_msg = \"Model version %d out of range, current version: %d\" % (\n request.model_version,\n self._version,\n )\n self.logger.warning(err_msg)\n raise ValueError(err_msg)\n\n res = master_pb2.ReportTaskResultReply()\n if request.model_version < self._version:\n self.logger.warning(\n \"Task result for outdated version %d dropped\",\n request.model_version,\n )\n res.accepted = False\n res.model_version = self._version\n return res\n\n if request.err_message:\n self.logger.warning(\"Worker error: %s\" % request.err_message)\n res.accepted = False\n res.model_version = self._version\n return res\n\n # TODO: Update task queue with task_id\n with self._lock:\n tmp = {}\n # Do sanity check before accumulating gradients.\n for k, v in request.gradient.items():\n if k not in self._model:\n raise ValueError(\n \"Gradient key: %s is not part of model\", k\n )\n arr = tensor_to_ndarray(v)\n if arr.shape != self._model[k].numpy().shape:\n raise ValueError(\n \"Gradient key: %s has incompatible dimension\", k\n )\n tmp[k] = arr\n\n for k, v in tmp.items():\n if k in self._gradient_sum:\n self._gradient_sum[k] = self._gradient_sum[k] + v\n else:\n self._gradient_sum[k] = v\n\n self._grad_n += 1\n if self._grad_n >= self._grad_to_wait:\n self._update_model()\n res.accepted = True\n res.model_version = self._version\n return res\n", "path": "elasticdl/master/servicer.py"}], "after_files": [{"content": "import threading\nimport numpy as np\n\nimport tensorflow as tf\nassert tf.executing_eagerly()\n\nfrom proto import master_pb2\nfrom proto import master_pb2_grpc\nfrom util.ndarray import ndarray_to_tensor, tensor_to_ndarray\n\n\nclass MasterServicer(master_pb2_grpc.MasterServicer):\n \"\"\"Master service implementation\"\"\"\n\n def __init__(self, logger, grads_to_wait, optimizer):\n self.logger = logger\n self._opt = optimizer\n self._lock = threading.Lock()\n # TODO: random initialization\n # A <string, tf.ResourceVariable> map. We use tf.ResourceVariable\n # instead ndarray to avoid copying and conversion when calling\n # optimizer's apply_gradients() function.\n self._model = {}\n self._version = 0\n self._gradient_sum = {}\n self._grad_to_wait = grads_to_wait\n self._grad_n = 0\n\n def _set_model_var(self, name, value):\n \"\"\"Add or set model variable. Value should be a float32 ndarray\"\"\"\n if value.dtype != np.float32:\n raise ValueError(\"Value should be a float32 numpy array\")\n self._model[name] = tf.Variable(value, name=name)\n\n def GetTask(self, request, context):\n # TODO: implent task queues. Return an empty task for now.\n res = master_pb2.Task()\n res.shard_file_name = \"\"\n res.model_version = self._version\n return res\n\n def GetModel(self, request, context):\n if request.min_version > self._version:\n err_msg = (\n \"Requested version %d not available yet, current version: %d\"\n % (request.min_version, self._version)\n )\n self.logger.warning(err_msg)\n raise ValueError(err_msg)\n\n res = master_pb2.Model()\n with self._lock:\n res.version = self._version\n for k, v in self._model.items():\n res.param[k].CopyFrom(ndarray_to_tensor(v.numpy()))\n return res\n\n def _update_model(self):\n assert self._lock.locked()\n grad_var = []\n for k in self._gradient_sum:\n self._gradient_sum[k] = self._gradient_sum[k] / self._grad_to_wait\n grad_var.append((self._gradient_sum[k], self._model[k]))\n self._opt.apply_gradients(grad_var)\n self._version += 1\n self._gradient_sum.clear()\n self._grad_n = 0\n\n def ReportGradient(self, request, context):\n if request.model_version > self._version:\n err_msg = \"Model version %d out of range, current version: %d\" % (\n request.model_version,\n self._version,\n )\n self.logger.warning(err_msg)\n raise ValueError(err_msg)\n\n res = master_pb2.ReportGradientReply()\n if request.model_version < self._version:\n self.logger.warning(\n \"Task result for outdated version %d dropped\",\n request.model_version,\n )\n res.accepted = False\n res.model_version = self._version\n return res\n\n # TODO: Update task queue with task_id\n with self._lock:\n tmp = {}\n # Do sanity check before accumulating gradients.\n for k, v in request.gradient.items():\n if k not in self._model:\n raise ValueError(\n \"Gradient key: %s is not part of model\", k\n )\n arr = tensor_to_ndarray(v)\n if arr.shape != self._model[k].numpy().shape:\n raise ValueError(\n \"Gradient key: %s has incompatible dimension\", k\n )\n tmp[k] = arr\n\n for k, v in tmp.items():\n if k in self._gradient_sum:\n self._gradient_sum[k] = self._gradient_sum[k] + v\n else:\n self._gradient_sum[k] = v\n\n self._grad_n += 1\n if self._grad_n >= self._grad_to_wait:\n self._update_model()\n res.accepted = True\n res.model_version = self._version\n return res\n", "path": "elasticdl/master/servicer.py"}]}
| 1,467 | 301 |
gh_patches_debug_23313
|
rasdani/github-patches
|
git_diff
|
jazzband__pip-tools-2082
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Output --no-binary and --only-binary options to preserve pip behavior
<!-- Describe the issue briefly here. -->
#### Environment Versions
1. OS Type: Linux
1. Python version: 3.12.3
1. pip version: 24.0
1. pip-tools version: 7.4.1
#### Steps to replicate
1. Compile using `--pip-args='--only-binary=:all: --no-binary=library'
2. See that compiled requirements list the `--no-binary=library` option first, `--only-binary=:all:` option second.
3. When attempting to install from these requirements the `--no-binary` is wiped out by the `--only-binary=:all:`
#### Expected result
The resulting requirements contain `--no-binary` and `--only-binary` options that have the same behavior as input options.
#### Actual result
Requirements don't have the same behavior as input options.
This improvement matters because using --no-binary/--only-binary is the best way for users to control the amount of potential code execution that happens during the installation process. I have a local fix that I plan on creating a PR.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `piptools/writer.py`
Content:
```
1 from __future__ import annotations
2
3 import io
4 import os
5 import re
6 import sys
7 from itertools import chain
8 from typing import BinaryIO, Iterable, Iterator, cast
9
10 from click import unstyle
11 from click.core import Context
12 from pip._internal.models.format_control import FormatControl
13 from pip._internal.req.req_install import InstallRequirement
14 from pip._vendor.packaging.markers import Marker
15 from pip._vendor.packaging.utils import canonicalize_name
16
17 from .logging import log
18 from .utils import (
19 comment,
20 dedup,
21 format_requirement,
22 get_compile_command,
23 key_from_ireq,
24 strip_extras,
25 )
26
27 MESSAGE_UNHASHED_PACKAGE = comment(
28 "# WARNING: pip install will require the following package to be hashed."
29 "\n# Consider using a hashable URL like "
30 "https://github.com/jazzband/pip-tools/archive/SOMECOMMIT.zip"
31 )
32
33 MESSAGE_UNSAFE_PACKAGES_UNPINNED = comment(
34 "# WARNING: The following packages were not pinned, but pip requires them to be"
35 "\n# pinned when the requirements file includes hashes and the requirement is not"
36 "\n# satisfied by a package already installed. "
37 "Consider using the --allow-unsafe flag."
38 )
39
40 MESSAGE_UNSAFE_PACKAGES = comment(
41 "# The following packages are considered to be unsafe in a requirements file:"
42 )
43
44 MESSAGE_UNINSTALLABLE = (
45 "The generated requirements file may be rejected by pip install. "
46 "See # WARNING lines for details."
47 )
48
49
50 strip_comes_from_line_re = re.compile(r" \(line \d+\)$")
51
52
53 def _comes_from_as_string(comes_from: str | InstallRequirement) -> str:
54 if isinstance(comes_from, str):
55 return strip_comes_from_line_re.sub("", comes_from)
56 return cast(str, canonicalize_name(key_from_ireq(comes_from)))
57
58
59 def annotation_style_split(required_by: set[str]) -> str:
60 sorted_required_by = sorted(required_by)
61 if len(sorted_required_by) == 1:
62 source = sorted_required_by[0]
63 annotation = "# via " + source
64 else:
65 annotation_lines = ["# via"]
66 for source in sorted_required_by:
67 annotation_lines.append(" # " + source)
68 annotation = "\n".join(annotation_lines)
69 return annotation
70
71
72 def annotation_style_line(required_by: set[str]) -> str:
73 return f"# via {', '.join(sorted(required_by))}"
74
75
76 class OutputWriter:
77 def __init__(
78 self,
79 dst_file: BinaryIO,
80 click_ctx: Context,
81 dry_run: bool,
82 emit_header: bool,
83 emit_index_url: bool,
84 emit_trusted_host: bool,
85 annotate: bool,
86 annotation_style: str,
87 strip_extras: bool,
88 generate_hashes: bool,
89 default_index_url: str,
90 index_urls: Iterable[str],
91 trusted_hosts: Iterable[str],
92 format_control: FormatControl,
93 linesep: str,
94 allow_unsafe: bool,
95 find_links: list[str],
96 emit_find_links: bool,
97 emit_options: bool,
98 ) -> None:
99 self.dst_file = dst_file
100 self.click_ctx = click_ctx
101 self.dry_run = dry_run
102 self.emit_header = emit_header
103 self.emit_index_url = emit_index_url
104 self.emit_trusted_host = emit_trusted_host
105 self.annotate = annotate
106 self.annotation_style = annotation_style
107 self.strip_extras = strip_extras
108 self.generate_hashes = generate_hashes
109 self.default_index_url = default_index_url
110 self.index_urls = index_urls
111 self.trusted_hosts = trusted_hosts
112 self.format_control = format_control
113 self.linesep = linesep
114 self.allow_unsafe = allow_unsafe
115 self.find_links = find_links
116 self.emit_find_links = emit_find_links
117 self.emit_options = emit_options
118
119 def _sort_key(self, ireq: InstallRequirement) -> tuple[bool, str]:
120 return (not ireq.editable, key_from_ireq(ireq))
121
122 def write_header(self) -> Iterator[str]:
123 if self.emit_header:
124 yield comment("#")
125 yield comment(
126 "# This file is autogenerated by pip-compile with Python "
127 f"{sys.version_info.major}.{sys.version_info.minor}"
128 )
129 yield comment("# by the following command:")
130 yield comment("#")
131 compile_command = os.environ.get(
132 "CUSTOM_COMPILE_COMMAND"
133 ) or get_compile_command(self.click_ctx)
134 yield comment(f"# {compile_command}")
135 yield comment("#")
136
137 def write_index_options(self) -> Iterator[str]:
138 if self.emit_index_url:
139 for index, index_url in enumerate(dedup(self.index_urls)):
140 if index == 0 and index_url.rstrip("/") == self.default_index_url:
141 continue
142 flag = "--index-url" if index == 0 else "--extra-index-url"
143 yield f"{flag} {index_url}"
144
145 def write_trusted_hosts(self) -> Iterator[str]:
146 if self.emit_trusted_host:
147 for trusted_host in dedup(self.trusted_hosts):
148 yield f"--trusted-host {trusted_host}"
149
150 def write_format_controls(self) -> Iterator[str]:
151 for nb in dedup(sorted(self.format_control.no_binary)):
152 yield f"--no-binary {nb}"
153 for ob in dedup(sorted(self.format_control.only_binary)):
154 yield f"--only-binary {ob}"
155
156 def write_find_links(self) -> Iterator[str]:
157 if self.emit_find_links:
158 for find_link in dedup(self.find_links):
159 yield f"--find-links {find_link}"
160
161 def write_flags(self) -> Iterator[str]:
162 if not self.emit_options:
163 return
164 emitted = False
165 for line in chain(
166 self.write_index_options(),
167 self.write_find_links(),
168 self.write_trusted_hosts(),
169 self.write_format_controls(),
170 ):
171 emitted = True
172 yield line
173 if emitted:
174 yield ""
175
176 def _iter_lines(
177 self,
178 results: set[InstallRequirement],
179 unsafe_requirements: set[InstallRequirement],
180 unsafe_packages: set[str],
181 markers: dict[str, Marker],
182 hashes: dict[InstallRequirement, set[str]] | None = None,
183 ) -> Iterator[str]:
184 # default values
185 unsafe_packages = unsafe_packages if self.allow_unsafe else set()
186 hashes = hashes or {}
187
188 # Check for unhashed or unpinned packages if at least one package does have
189 # hashes, which will trigger pip install's --require-hashes mode.
190 warn_uninstallable = False
191 has_hashes = hashes and any(hash for hash in hashes.values())
192
193 yielded = False
194
195 for line in self.write_header():
196 yield line
197 yielded = True
198 for line in self.write_flags():
199 yield line
200 yielded = True
201
202 unsafe_requirements = unsafe_requirements or {
203 r for r in results if r.name in unsafe_packages
204 }
205 packages = {r for r in results if r.name not in unsafe_packages}
206
207 if packages:
208 for ireq in sorted(packages, key=self._sort_key):
209 if has_hashes and not hashes.get(ireq):
210 yield MESSAGE_UNHASHED_PACKAGE
211 warn_uninstallable = True
212 line = self._format_requirement(
213 ireq, markers.get(key_from_ireq(ireq)), hashes=hashes
214 )
215 yield line
216 yielded = True
217
218 if unsafe_requirements:
219 yield ""
220 yielded = True
221 if has_hashes and not self.allow_unsafe:
222 yield MESSAGE_UNSAFE_PACKAGES_UNPINNED
223 warn_uninstallable = True
224 else:
225 yield MESSAGE_UNSAFE_PACKAGES
226
227 for ireq in sorted(unsafe_requirements, key=self._sort_key):
228 ireq_key = key_from_ireq(ireq)
229 if not self.allow_unsafe:
230 yield comment(f"# {ireq_key}")
231 else:
232 line = self._format_requirement(
233 ireq, marker=markers.get(ireq_key), hashes=hashes
234 )
235 yield line
236
237 # Yield even when there's no real content, so that blank files are written
238 if not yielded:
239 yield ""
240
241 if warn_uninstallable:
242 log.warning(MESSAGE_UNINSTALLABLE)
243
244 def write(
245 self,
246 results: set[InstallRequirement],
247 unsafe_requirements: set[InstallRequirement],
248 unsafe_packages: set[str],
249 markers: dict[str, Marker],
250 hashes: dict[InstallRequirement, set[str]] | None,
251 ) -> None:
252 if not self.dry_run:
253 dst_file = io.TextIOWrapper(
254 self.dst_file,
255 encoding="utf8",
256 newline=self.linesep,
257 line_buffering=True,
258 )
259 try:
260 for line in self._iter_lines(
261 results, unsafe_requirements, unsafe_packages, markers, hashes
262 ):
263 if self.dry_run:
264 # Bypass the log level to always print this during a dry run
265 log.log(line)
266 else:
267 log.info(line)
268 dst_file.write(unstyle(line))
269 dst_file.write("\n")
270 finally:
271 if not self.dry_run:
272 dst_file.detach()
273
274 def _format_requirement(
275 self,
276 ireq: InstallRequirement,
277 marker: Marker | None = None,
278 hashes: dict[InstallRequirement, set[str]] | None = None,
279 ) -> str:
280 ireq_hashes = (hashes if hashes is not None else {}).get(ireq)
281
282 line = format_requirement(ireq, marker=marker, hashes=ireq_hashes)
283 if self.strip_extras:
284 line = strip_extras(line)
285
286 if not self.annotate:
287 return line
288
289 # Annotate what packages or reqs-ins this package is required by
290 required_by = set()
291 if hasattr(ireq, "_source_ireqs"):
292 required_by |= {
293 _comes_from_as_string(src_ireq.comes_from)
294 for src_ireq in ireq._source_ireqs
295 if src_ireq.comes_from
296 }
297
298 # Filter out the origin install requirements for extras.
299 # See https://github.com/jazzband/pip-tools/issues/2003
300 if ireq.comes_from and (
301 isinstance(ireq.comes_from, str) or ireq.comes_from.name != ireq.name
302 ):
303 required_by.add(_comes_from_as_string(ireq.comes_from))
304
305 required_by |= set(getattr(ireq, "_required_by", set()))
306
307 if required_by:
308 if self.annotation_style == "split":
309 annotation = annotation_style_split(required_by)
310 sep = "\n "
311 elif self.annotation_style == "line":
312 annotation = annotation_style_line(required_by)
313 sep = "\n " if ireq_hashes else " "
314 else: # pragma: no cover
315 raise ValueError("Invalid value for annotation style")
316 if self.strip_extras:
317 annotation = strip_extras(annotation)
318 # 24 is one reasonable column size to use here, that we've used in the past
319 lines = f"{line:24}{sep}{comment(annotation)}".splitlines()
320 line = "\n".join(ln.rstrip() for ln in lines)
321
322 return line
323
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/piptools/writer.py b/piptools/writer.py
--- a/piptools/writer.py
+++ b/piptools/writer.py
@@ -148,9 +148,27 @@
yield f"--trusted-host {trusted_host}"
def write_format_controls(self) -> Iterator[str]:
- for nb in dedup(sorted(self.format_control.no_binary)):
+ # The ordering of output needs to preserve the behavior of pip's
+ # FormatControl.get_allowed_formats(). The behavior is the following:
+ #
+ # * Parsing of CLI options happens first to last.
+ # * --only-binary takes precedence over --no-binary
+ # * Package names take precedence over :all:
+ # * We'll never see :all: in both due to mutual exclusion.
+ #
+ # So in summary, we want to emit :all: first and then package names later.
+ no_binary = self.format_control.no_binary.copy()
+ only_binary = self.format_control.only_binary.copy()
+
+ if ":all:" in no_binary:
+ yield "--no-binary :all:"
+ no_binary.remove(":all:")
+ if ":all:" in only_binary:
+ yield "--only-binary :all:"
+ only_binary.remove(":all:")
+ for nb in dedup(sorted(no_binary)):
yield f"--no-binary {nb}"
- for ob in dedup(sorted(self.format_control.only_binary)):
+ for ob in dedup(sorted(only_binary)):
yield f"--only-binary {ob}"
def write_find_links(self) -> Iterator[str]:
|
{"golden_diff": "diff --git a/piptools/writer.py b/piptools/writer.py\n--- a/piptools/writer.py\n+++ b/piptools/writer.py\n@@ -148,9 +148,27 @@\n yield f\"--trusted-host {trusted_host}\"\n \n def write_format_controls(self) -> Iterator[str]:\n- for nb in dedup(sorted(self.format_control.no_binary)):\n+ # The ordering of output needs to preserve the behavior of pip's\n+ # FormatControl.get_allowed_formats(). The behavior is the following:\n+ #\n+ # * Parsing of CLI options happens first to last.\n+ # * --only-binary takes precedence over --no-binary\n+ # * Package names take precedence over :all:\n+ # * We'll never see :all: in both due to mutual exclusion.\n+ #\n+ # So in summary, we want to emit :all: first and then package names later.\n+ no_binary = self.format_control.no_binary.copy()\n+ only_binary = self.format_control.only_binary.copy()\n+\n+ if \":all:\" in no_binary:\n+ yield \"--no-binary :all:\"\n+ no_binary.remove(\":all:\")\n+ if \":all:\" in only_binary:\n+ yield \"--only-binary :all:\"\n+ only_binary.remove(\":all:\")\n+ for nb in dedup(sorted(no_binary)):\n yield f\"--no-binary {nb}\"\n- for ob in dedup(sorted(self.format_control.only_binary)):\n+ for ob in dedup(sorted(only_binary)):\n yield f\"--only-binary {ob}\"\n \n def write_find_links(self) -> Iterator[str]:\n", "issue": "Output --no-binary and --only-binary options to preserve pip behavior\n<!-- Describe the issue briefly here. -->\r\n\r\n#### Environment Versions\r\n\r\n1. OS Type: Linux\r\n1. Python version: 3.12.3\r\n1. pip version: 24.0\r\n1. pip-tools version: 7.4.1\r\n\r\n#### Steps to replicate\r\n\r\n1. Compile using `--pip-args='--only-binary=:all: --no-binary=library'\r\n2. See that compiled requirements list the `--no-binary=library` option first, `--only-binary=:all:` option second.\r\n3. When attempting to install from these requirements the `--no-binary` is wiped out by the `--only-binary=:all:`\r\n\r\n#### Expected result\r\n\r\nThe resulting requirements contain `--no-binary` and `--only-binary` options that have the same behavior as input options.\r\n\r\n#### Actual result\r\n\r\nRequirements don't have the same behavior as input options.\r\n\r\nThis improvement matters because using --no-binary/--only-binary is the best way for users to control the amount of potential code execution that happens during the installation process. I have a local fix that I plan on creating a PR.\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport io\nimport os\nimport re\nimport sys\nfrom itertools import chain\nfrom typing import BinaryIO, Iterable, Iterator, cast\n\nfrom click import unstyle\nfrom click.core import Context\nfrom pip._internal.models.format_control import FormatControl\nfrom pip._internal.req.req_install import InstallRequirement\nfrom pip._vendor.packaging.markers import Marker\nfrom pip._vendor.packaging.utils import canonicalize_name\n\nfrom .logging import log\nfrom .utils import (\n comment,\n dedup,\n format_requirement,\n get_compile_command,\n key_from_ireq,\n strip_extras,\n)\n\nMESSAGE_UNHASHED_PACKAGE = comment(\n \"# WARNING: pip install will require the following package to be hashed.\"\n \"\\n# Consider using a hashable URL like \"\n \"https://github.com/jazzband/pip-tools/archive/SOMECOMMIT.zip\"\n)\n\nMESSAGE_UNSAFE_PACKAGES_UNPINNED = comment(\n \"# WARNING: The following packages were not pinned, but pip requires them to be\"\n \"\\n# pinned when the requirements file includes hashes and the requirement is not\"\n \"\\n# satisfied by a package already installed. \"\n \"Consider using the --allow-unsafe flag.\"\n)\n\nMESSAGE_UNSAFE_PACKAGES = comment(\n \"# The following packages are considered to be unsafe in a requirements file:\"\n)\n\nMESSAGE_UNINSTALLABLE = (\n \"The generated requirements file may be rejected by pip install. \"\n \"See # WARNING lines for details.\"\n)\n\n\nstrip_comes_from_line_re = re.compile(r\" \\(line \\d+\\)$\")\n\n\ndef _comes_from_as_string(comes_from: str | InstallRequirement) -> str:\n if isinstance(comes_from, str):\n return strip_comes_from_line_re.sub(\"\", comes_from)\n return cast(str, canonicalize_name(key_from_ireq(comes_from)))\n\n\ndef annotation_style_split(required_by: set[str]) -> str:\n sorted_required_by = sorted(required_by)\n if len(sorted_required_by) == 1:\n source = sorted_required_by[0]\n annotation = \"# via \" + source\n else:\n annotation_lines = [\"# via\"]\n for source in sorted_required_by:\n annotation_lines.append(\" # \" + source)\n annotation = \"\\n\".join(annotation_lines)\n return annotation\n\n\ndef annotation_style_line(required_by: set[str]) -> str:\n return f\"# via {', '.join(sorted(required_by))}\"\n\n\nclass OutputWriter:\n def __init__(\n self,\n dst_file: BinaryIO,\n click_ctx: Context,\n dry_run: bool,\n emit_header: bool,\n emit_index_url: bool,\n emit_trusted_host: bool,\n annotate: bool,\n annotation_style: str,\n strip_extras: bool,\n generate_hashes: bool,\n default_index_url: str,\n index_urls: Iterable[str],\n trusted_hosts: Iterable[str],\n format_control: FormatControl,\n linesep: str,\n allow_unsafe: bool,\n find_links: list[str],\n emit_find_links: bool,\n emit_options: bool,\n ) -> None:\n self.dst_file = dst_file\n self.click_ctx = click_ctx\n self.dry_run = dry_run\n self.emit_header = emit_header\n self.emit_index_url = emit_index_url\n self.emit_trusted_host = emit_trusted_host\n self.annotate = annotate\n self.annotation_style = annotation_style\n self.strip_extras = strip_extras\n self.generate_hashes = generate_hashes\n self.default_index_url = default_index_url\n self.index_urls = index_urls\n self.trusted_hosts = trusted_hosts\n self.format_control = format_control\n self.linesep = linesep\n self.allow_unsafe = allow_unsafe\n self.find_links = find_links\n self.emit_find_links = emit_find_links\n self.emit_options = emit_options\n\n def _sort_key(self, ireq: InstallRequirement) -> tuple[bool, str]:\n return (not ireq.editable, key_from_ireq(ireq))\n\n def write_header(self) -> Iterator[str]:\n if self.emit_header:\n yield comment(\"#\")\n yield comment(\n \"# This file is autogenerated by pip-compile with Python \"\n f\"{sys.version_info.major}.{sys.version_info.minor}\"\n )\n yield comment(\"# by the following command:\")\n yield comment(\"#\")\n compile_command = os.environ.get(\n \"CUSTOM_COMPILE_COMMAND\"\n ) or get_compile_command(self.click_ctx)\n yield comment(f\"# {compile_command}\")\n yield comment(\"#\")\n\n def write_index_options(self) -> Iterator[str]:\n if self.emit_index_url:\n for index, index_url in enumerate(dedup(self.index_urls)):\n if index == 0 and index_url.rstrip(\"/\") == self.default_index_url:\n continue\n flag = \"--index-url\" if index == 0 else \"--extra-index-url\"\n yield f\"{flag} {index_url}\"\n\n def write_trusted_hosts(self) -> Iterator[str]:\n if self.emit_trusted_host:\n for trusted_host in dedup(self.trusted_hosts):\n yield f\"--trusted-host {trusted_host}\"\n\n def write_format_controls(self) -> Iterator[str]:\n for nb in dedup(sorted(self.format_control.no_binary)):\n yield f\"--no-binary {nb}\"\n for ob in dedup(sorted(self.format_control.only_binary)):\n yield f\"--only-binary {ob}\"\n\n def write_find_links(self) -> Iterator[str]:\n if self.emit_find_links:\n for find_link in dedup(self.find_links):\n yield f\"--find-links {find_link}\"\n\n def write_flags(self) -> Iterator[str]:\n if not self.emit_options:\n return\n emitted = False\n for line in chain(\n self.write_index_options(),\n self.write_find_links(),\n self.write_trusted_hosts(),\n self.write_format_controls(),\n ):\n emitted = True\n yield line\n if emitted:\n yield \"\"\n\n def _iter_lines(\n self,\n results: set[InstallRequirement],\n unsafe_requirements: set[InstallRequirement],\n unsafe_packages: set[str],\n markers: dict[str, Marker],\n hashes: dict[InstallRequirement, set[str]] | None = None,\n ) -> Iterator[str]:\n # default values\n unsafe_packages = unsafe_packages if self.allow_unsafe else set()\n hashes = hashes or {}\n\n # Check for unhashed or unpinned packages if at least one package does have\n # hashes, which will trigger pip install's --require-hashes mode.\n warn_uninstallable = False\n has_hashes = hashes and any(hash for hash in hashes.values())\n\n yielded = False\n\n for line in self.write_header():\n yield line\n yielded = True\n for line in self.write_flags():\n yield line\n yielded = True\n\n unsafe_requirements = unsafe_requirements or {\n r for r in results if r.name in unsafe_packages\n }\n packages = {r for r in results if r.name not in unsafe_packages}\n\n if packages:\n for ireq in sorted(packages, key=self._sort_key):\n if has_hashes and not hashes.get(ireq):\n yield MESSAGE_UNHASHED_PACKAGE\n warn_uninstallable = True\n line = self._format_requirement(\n ireq, markers.get(key_from_ireq(ireq)), hashes=hashes\n )\n yield line\n yielded = True\n\n if unsafe_requirements:\n yield \"\"\n yielded = True\n if has_hashes and not self.allow_unsafe:\n yield MESSAGE_UNSAFE_PACKAGES_UNPINNED\n warn_uninstallable = True\n else:\n yield MESSAGE_UNSAFE_PACKAGES\n\n for ireq in sorted(unsafe_requirements, key=self._sort_key):\n ireq_key = key_from_ireq(ireq)\n if not self.allow_unsafe:\n yield comment(f\"# {ireq_key}\")\n else:\n line = self._format_requirement(\n ireq, marker=markers.get(ireq_key), hashes=hashes\n )\n yield line\n\n # Yield even when there's no real content, so that blank files are written\n if not yielded:\n yield \"\"\n\n if warn_uninstallable:\n log.warning(MESSAGE_UNINSTALLABLE)\n\n def write(\n self,\n results: set[InstallRequirement],\n unsafe_requirements: set[InstallRequirement],\n unsafe_packages: set[str],\n markers: dict[str, Marker],\n hashes: dict[InstallRequirement, set[str]] | None,\n ) -> None:\n if not self.dry_run:\n dst_file = io.TextIOWrapper(\n self.dst_file,\n encoding=\"utf8\",\n newline=self.linesep,\n line_buffering=True,\n )\n try:\n for line in self._iter_lines(\n results, unsafe_requirements, unsafe_packages, markers, hashes\n ):\n if self.dry_run:\n # Bypass the log level to always print this during a dry run\n log.log(line)\n else:\n log.info(line)\n dst_file.write(unstyle(line))\n dst_file.write(\"\\n\")\n finally:\n if not self.dry_run:\n dst_file.detach()\n\n def _format_requirement(\n self,\n ireq: InstallRequirement,\n marker: Marker | None = None,\n hashes: dict[InstallRequirement, set[str]] | None = None,\n ) -> str:\n ireq_hashes = (hashes if hashes is not None else {}).get(ireq)\n\n line = format_requirement(ireq, marker=marker, hashes=ireq_hashes)\n if self.strip_extras:\n line = strip_extras(line)\n\n if not self.annotate:\n return line\n\n # Annotate what packages or reqs-ins this package is required by\n required_by = set()\n if hasattr(ireq, \"_source_ireqs\"):\n required_by |= {\n _comes_from_as_string(src_ireq.comes_from)\n for src_ireq in ireq._source_ireqs\n if src_ireq.comes_from\n }\n\n # Filter out the origin install requirements for extras.\n # See https://github.com/jazzband/pip-tools/issues/2003\n if ireq.comes_from and (\n isinstance(ireq.comes_from, str) or ireq.comes_from.name != ireq.name\n ):\n required_by.add(_comes_from_as_string(ireq.comes_from))\n\n required_by |= set(getattr(ireq, \"_required_by\", set()))\n\n if required_by:\n if self.annotation_style == \"split\":\n annotation = annotation_style_split(required_by)\n sep = \"\\n \"\n elif self.annotation_style == \"line\":\n annotation = annotation_style_line(required_by)\n sep = \"\\n \" if ireq_hashes else \" \"\n else: # pragma: no cover\n raise ValueError(\"Invalid value for annotation style\")\n if self.strip_extras:\n annotation = strip_extras(annotation)\n # 24 is one reasonable column size to use here, that we've used in the past\n lines = f\"{line:24}{sep}{comment(annotation)}\".splitlines()\n line = \"\\n\".join(ln.rstrip() for ln in lines)\n\n return line\n", "path": "piptools/writer.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport io\nimport os\nimport re\nimport sys\nfrom itertools import chain\nfrom typing import BinaryIO, Iterable, Iterator, cast\n\nfrom click import unstyle\nfrom click.core import Context\nfrom pip._internal.models.format_control import FormatControl\nfrom pip._internal.req.req_install import InstallRequirement\nfrom pip._vendor.packaging.markers import Marker\nfrom pip._vendor.packaging.utils import canonicalize_name\n\nfrom .logging import log\nfrom .utils import (\n comment,\n dedup,\n format_requirement,\n get_compile_command,\n key_from_ireq,\n strip_extras,\n)\n\nMESSAGE_UNHASHED_PACKAGE = comment(\n \"# WARNING: pip install will require the following package to be hashed.\"\n \"\\n# Consider using a hashable URL like \"\n \"https://github.com/jazzband/pip-tools/archive/SOMECOMMIT.zip\"\n)\n\nMESSAGE_UNSAFE_PACKAGES_UNPINNED = comment(\n \"# WARNING: The following packages were not pinned, but pip requires them to be\"\n \"\\n# pinned when the requirements file includes hashes and the requirement is not\"\n \"\\n# satisfied by a package already installed. \"\n \"Consider using the --allow-unsafe flag.\"\n)\n\nMESSAGE_UNSAFE_PACKAGES = comment(\n \"# The following packages are considered to be unsafe in a requirements file:\"\n)\n\nMESSAGE_UNINSTALLABLE = (\n \"The generated requirements file may be rejected by pip install. \"\n \"See # WARNING lines for details.\"\n)\n\n\nstrip_comes_from_line_re = re.compile(r\" \\(line \\d+\\)$\")\n\n\ndef _comes_from_as_string(comes_from: str | InstallRequirement) -> str:\n if isinstance(comes_from, str):\n return strip_comes_from_line_re.sub(\"\", comes_from)\n return cast(str, canonicalize_name(key_from_ireq(comes_from)))\n\n\ndef annotation_style_split(required_by: set[str]) -> str:\n sorted_required_by = sorted(required_by)\n if len(sorted_required_by) == 1:\n source = sorted_required_by[0]\n annotation = \"# via \" + source\n else:\n annotation_lines = [\"# via\"]\n for source in sorted_required_by:\n annotation_lines.append(\" # \" + source)\n annotation = \"\\n\".join(annotation_lines)\n return annotation\n\n\ndef annotation_style_line(required_by: set[str]) -> str:\n return f\"# via {', '.join(sorted(required_by))}\"\n\n\nclass OutputWriter:\n def __init__(\n self,\n dst_file: BinaryIO,\n click_ctx: Context,\n dry_run: bool,\n emit_header: bool,\n emit_index_url: bool,\n emit_trusted_host: bool,\n annotate: bool,\n annotation_style: str,\n strip_extras: bool,\n generate_hashes: bool,\n default_index_url: str,\n index_urls: Iterable[str],\n trusted_hosts: Iterable[str],\n format_control: FormatControl,\n linesep: str,\n allow_unsafe: bool,\n find_links: list[str],\n emit_find_links: bool,\n emit_options: bool,\n ) -> None:\n self.dst_file = dst_file\n self.click_ctx = click_ctx\n self.dry_run = dry_run\n self.emit_header = emit_header\n self.emit_index_url = emit_index_url\n self.emit_trusted_host = emit_trusted_host\n self.annotate = annotate\n self.annotation_style = annotation_style\n self.strip_extras = strip_extras\n self.generate_hashes = generate_hashes\n self.default_index_url = default_index_url\n self.index_urls = index_urls\n self.trusted_hosts = trusted_hosts\n self.format_control = format_control\n self.linesep = linesep\n self.allow_unsafe = allow_unsafe\n self.find_links = find_links\n self.emit_find_links = emit_find_links\n self.emit_options = emit_options\n\n def _sort_key(self, ireq: InstallRequirement) -> tuple[bool, str]:\n return (not ireq.editable, key_from_ireq(ireq))\n\n def write_header(self) -> Iterator[str]:\n if self.emit_header:\n yield comment(\"#\")\n yield comment(\n \"# This file is autogenerated by pip-compile with Python \"\n f\"{sys.version_info.major}.{sys.version_info.minor}\"\n )\n yield comment(\"# by the following command:\")\n yield comment(\"#\")\n compile_command = os.environ.get(\n \"CUSTOM_COMPILE_COMMAND\"\n ) or get_compile_command(self.click_ctx)\n yield comment(f\"# {compile_command}\")\n yield comment(\"#\")\n\n def write_index_options(self) -> Iterator[str]:\n if self.emit_index_url:\n for index, index_url in enumerate(dedup(self.index_urls)):\n if index == 0 and index_url.rstrip(\"/\") == self.default_index_url:\n continue\n flag = \"--index-url\" if index == 0 else \"--extra-index-url\"\n yield f\"{flag} {index_url}\"\n\n def write_trusted_hosts(self) -> Iterator[str]:\n if self.emit_trusted_host:\n for trusted_host in dedup(self.trusted_hosts):\n yield f\"--trusted-host {trusted_host}\"\n\n def write_format_controls(self) -> Iterator[str]:\n # The ordering of output needs to preserve the behavior of pip's\n # FormatControl.get_allowed_formats(). The behavior is the following:\n #\n # * Parsing of CLI options happens first to last.\n # * --only-binary takes precedence over --no-binary\n # * Package names take precedence over :all:\n # * We'll never see :all: in both due to mutual exclusion.\n #\n # So in summary, we want to emit :all: first and then package names later.\n no_binary = self.format_control.no_binary.copy()\n only_binary = self.format_control.only_binary.copy()\n\n if \":all:\" in no_binary:\n yield \"--no-binary :all:\"\n no_binary.remove(\":all:\")\n if \":all:\" in only_binary:\n yield \"--only-binary :all:\"\n only_binary.remove(\":all:\")\n for nb in dedup(sorted(no_binary)):\n yield f\"--no-binary {nb}\"\n for ob in dedup(sorted(only_binary)):\n yield f\"--only-binary {ob}\"\n\n def write_find_links(self) -> Iterator[str]:\n if self.emit_find_links:\n for find_link in dedup(self.find_links):\n yield f\"--find-links {find_link}\"\n\n def write_flags(self) -> Iterator[str]:\n if not self.emit_options:\n return\n emitted = False\n for line in chain(\n self.write_index_options(),\n self.write_find_links(),\n self.write_trusted_hosts(),\n self.write_format_controls(),\n ):\n emitted = True\n yield line\n if emitted:\n yield \"\"\n\n def _iter_lines(\n self,\n results: set[InstallRequirement],\n unsafe_requirements: set[InstallRequirement],\n unsafe_packages: set[str],\n markers: dict[str, Marker],\n hashes: dict[InstallRequirement, set[str]] | None = None,\n ) -> Iterator[str]:\n # default values\n unsafe_packages = unsafe_packages if self.allow_unsafe else set()\n hashes = hashes or {}\n\n # Check for unhashed or unpinned packages if at least one package does have\n # hashes, which will trigger pip install's --require-hashes mode.\n warn_uninstallable = False\n has_hashes = hashes and any(hash for hash in hashes.values())\n\n yielded = False\n\n for line in self.write_header():\n yield line\n yielded = True\n for line in self.write_flags():\n yield line\n yielded = True\n\n unsafe_requirements = unsafe_requirements or {\n r for r in results if r.name in unsafe_packages\n }\n packages = {r for r in results if r.name not in unsafe_packages}\n\n if packages:\n for ireq in sorted(packages, key=self._sort_key):\n if has_hashes and not hashes.get(ireq):\n yield MESSAGE_UNHASHED_PACKAGE\n warn_uninstallable = True\n line = self._format_requirement(\n ireq, markers.get(key_from_ireq(ireq)), hashes=hashes\n )\n yield line\n yielded = True\n\n if unsafe_requirements:\n yield \"\"\n yielded = True\n if has_hashes and not self.allow_unsafe:\n yield MESSAGE_UNSAFE_PACKAGES_UNPINNED\n warn_uninstallable = True\n else:\n yield MESSAGE_UNSAFE_PACKAGES\n\n for ireq in sorted(unsafe_requirements, key=self._sort_key):\n ireq_key = key_from_ireq(ireq)\n if not self.allow_unsafe:\n yield comment(f\"# {ireq_key}\")\n else:\n line = self._format_requirement(\n ireq, marker=markers.get(ireq_key), hashes=hashes\n )\n yield line\n\n # Yield even when there's no real content, so that blank files are written\n if not yielded:\n yield \"\"\n\n if warn_uninstallable:\n log.warning(MESSAGE_UNINSTALLABLE)\n\n def write(\n self,\n results: set[InstallRequirement],\n unsafe_requirements: set[InstallRequirement],\n unsafe_packages: set[str],\n markers: dict[str, Marker],\n hashes: dict[InstallRequirement, set[str]] | None,\n ) -> None:\n if not self.dry_run:\n dst_file = io.TextIOWrapper(\n self.dst_file,\n encoding=\"utf8\",\n newline=self.linesep,\n line_buffering=True,\n )\n try:\n for line in self._iter_lines(\n results, unsafe_requirements, unsafe_packages, markers, hashes\n ):\n if self.dry_run:\n # Bypass the log level to always print this during a dry run\n log.log(line)\n else:\n log.info(line)\n dst_file.write(unstyle(line))\n dst_file.write(\"\\n\")\n finally:\n if not self.dry_run:\n dst_file.detach()\n\n def _format_requirement(\n self,\n ireq: InstallRequirement,\n marker: Marker | None = None,\n hashes: dict[InstallRequirement, set[str]] | None = None,\n ) -> str:\n ireq_hashes = (hashes if hashes is not None else {}).get(ireq)\n\n line = format_requirement(ireq, marker=marker, hashes=ireq_hashes)\n if self.strip_extras:\n line = strip_extras(line)\n\n if not self.annotate:\n return line\n\n # Annotate what packages or reqs-ins this package is required by\n required_by = set()\n if hasattr(ireq, \"_source_ireqs\"):\n required_by |= {\n _comes_from_as_string(src_ireq.comes_from)\n for src_ireq in ireq._source_ireqs\n if src_ireq.comes_from\n }\n\n # Filter out the origin install requirements for extras.\n # See https://github.com/jazzband/pip-tools/issues/2003\n if ireq.comes_from and (\n isinstance(ireq.comes_from, str) or ireq.comes_from.name != ireq.name\n ):\n required_by.add(_comes_from_as_string(ireq.comes_from))\n\n required_by |= set(getattr(ireq, \"_required_by\", set()))\n\n if required_by:\n if self.annotation_style == \"split\":\n annotation = annotation_style_split(required_by)\n sep = \"\\n \"\n elif self.annotation_style == \"line\":\n annotation = annotation_style_line(required_by)\n sep = \"\\n \" if ireq_hashes else \" \"\n else: # pragma: no cover\n raise ValueError(\"Invalid value for annotation style\")\n if self.strip_extras:\n annotation = strip_extras(annotation)\n # 24 is one reasonable column size to use here, that we've used in the past\n lines = f\"{line:24}{sep}{comment(annotation)}\".splitlines()\n line = \"\\n\".join(ln.rstrip() for ln in lines)\n\n return line\n", "path": "piptools/writer.py"}]}
| 3,801 | 352 |
gh_patches_debug_61519
|
rasdani/github-patches
|
git_diff
|
open-mmlab__mmpose-1906
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
little config error in 1.x
mmpose/tree/1.x/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand)/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py
mobilenetv2 out_channels is 1280, however "in_channles" of the head is 2048 in this config file.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py`
Content:
```
1 _base_ = ['../../../_base_/default_runtime.py']
2
3 # runtime
4 train_cfg = dict(max_epochs=210, val_interval=10)
5
6 # optimizer
7 optim_wrapper = dict(optimizer=dict(
8 type='Adam',
9 lr=5e-4,
10 ))
11
12 # learning policy
13 param_scheduler = [
14 dict(
15 type='LinearLR', begin=0, end=500, start_factor=0.001,
16 by_epoch=False), # warm-up
17 dict(
18 type='MultiStepLR',
19 begin=0,
20 end=210,
21 milestones=[170, 200],
22 gamma=0.1,
23 by_epoch=True)
24 ]
25
26 # automatically scaling LR based on the actual training batch size
27 auto_scale_lr = dict(base_batch_size=256)
28
29 # hooks
30 default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater'))
31 # codec settings
32 codec = dict(
33 type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2)
34
35 # model settings
36 model = dict(
37 type='TopdownPoseEstimator',
38 data_preprocessor=dict(
39 type='PoseDataPreprocessor',
40 mean=[123.675, 116.28, 103.53],
41 std=[58.395, 57.12, 57.375],
42 bgr_to_rgb=True),
43 backbone=dict(
44 type='MobileNetV2',
45 widen_factor=1.,
46 out_indices=(7, ),
47 init_cfg=dict(type='Pretrained', checkpoint='mmcls://mobilenet_v2')),
48 head=dict(
49 type='HeatmapHead',
50 in_channels=2048,
51 out_channels=21,
52 loss=dict(type='KeypointMSELoss', use_target_weight=True),
53 decoder=codec),
54 test_cfg=dict(
55 flip_test=True,
56 flip_mode='heatmap',
57 shift_heatmap=True,
58 ))
59
60 # base dataset settings
61 dataset_type = 'CocoWholeBodyHandDataset'
62 data_mode = 'topdown'
63 data_root = 'data/coco/'
64
65 # pipelines
66 train_pipeline = [
67 dict(type='LoadImage', file_client_args={{_base_.file_client_args}}),
68 dict(type='GetBBoxCenterScale'),
69 dict(
70 type='RandomBBoxTransform', rotate_factor=180,
71 scale_factor=(0.7, 1.3)),
72 dict(type='RandomFlip', direction='horizontal'),
73 dict(type='TopdownAffine', input_size=codec['input_size']),
74 dict(type='GenerateTarget', encoder=codec),
75 dict(type='PackPoseInputs')
76 ]
77 val_pipeline = [
78 dict(type='LoadImage', file_client_args={{_base_.file_client_args}}),
79 dict(type='GetBBoxCenterScale'),
80 dict(type='TopdownAffine', input_size=codec['input_size']),
81 dict(type='PackPoseInputs')
82 ]
83
84 # data loaders
85 train_dataloader = dict(
86 batch_size=32,
87 num_workers=2,
88 persistent_workers=True,
89 sampler=dict(type='DefaultSampler', shuffle=True),
90 dataset=dict(
91 type=dataset_type,
92 data_root=data_root,
93 data_mode=data_mode,
94 ann_file='annotations/coco_wholebody_train_v1.0.json',
95 data_prefix=dict(img='train2017/'),
96 pipeline=train_pipeline,
97 ))
98 val_dataloader = dict(
99 batch_size=32,
100 num_workers=2,
101 persistent_workers=True,
102 drop_last=False,
103 sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),
104 dataset=dict(
105 type=dataset_type,
106 data_root=data_root,
107 data_mode=data_mode,
108 ann_file='annotations/coco_wholebody_val_v1.0.json',
109 data_prefix=dict(img='val2017/'),
110 test_mode=True,
111 pipeline=val_pipeline,
112 ))
113 test_dataloader = val_dataloader
114
115 val_evaluator = [
116 dict(type='PCKAccuracy', thr=0.2),
117 dict(type='AUC'),
118 dict(type='EPE')
119 ]
120 test_evaluator = val_evaluator
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py
--- a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py
+++ b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py
@@ -47,7 +47,7 @@
init_cfg=dict(type='Pretrained', checkpoint='mmcls://mobilenet_v2')),
head=dict(
type='HeatmapHead',
- in_channels=2048,
+ in_channels=1280,
out_channels=21,
loss=dict(type='KeypointMSELoss', use_target_weight=True),
decoder=codec),
|
{"golden_diff": "diff --git a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py\n--- a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py\n+++ b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py\n@@ -47,7 +47,7 @@\n init_cfg=dict(type='Pretrained', checkpoint='mmcls://mobilenet_v2')),\n head=dict(\n type='HeatmapHead',\n- in_channels=2048,\n+ in_channels=1280,\n out_channels=21,\n loss=dict(type='KeypointMSELoss', use_target_weight=True),\n decoder=codec),\n", "issue": "little config error in 1.x\n\r\nmmpose/tree/1.x/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand)/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py\r\n\r\nmobilenetv2 out_channels is 1280, however \"in_channles\" of the head is 2048 in this config file. \r\n\n", "before_files": [{"content": "_base_ = ['../../../_base_/default_runtime.py']\n\n# runtime\ntrain_cfg = dict(max_epochs=210, val_interval=10)\n\n# optimizer\noptim_wrapper = dict(optimizer=dict(\n type='Adam',\n lr=5e-4,\n))\n\n# learning policy\nparam_scheduler = [\n dict(\n type='LinearLR', begin=0, end=500, start_factor=0.001,\n by_epoch=False), # warm-up\n dict(\n type='MultiStepLR',\n begin=0,\n end=210,\n milestones=[170, 200],\n gamma=0.1,\n by_epoch=True)\n]\n\n# automatically scaling LR based on the actual training batch size\nauto_scale_lr = dict(base_batch_size=256)\n\n# hooks\ndefault_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater'))\n# codec settings\ncodec = dict(\n type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2)\n\n# model settings\nmodel = dict(\n type='TopdownPoseEstimator',\n data_preprocessor=dict(\n type='PoseDataPreprocessor',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n bgr_to_rgb=True),\n backbone=dict(\n type='MobileNetV2',\n widen_factor=1.,\n out_indices=(7, ),\n init_cfg=dict(type='Pretrained', checkpoint='mmcls://mobilenet_v2')),\n head=dict(\n type='HeatmapHead',\n in_channels=2048,\n out_channels=21,\n loss=dict(type='KeypointMSELoss', use_target_weight=True),\n decoder=codec),\n test_cfg=dict(\n flip_test=True,\n flip_mode='heatmap',\n shift_heatmap=True,\n ))\n\n# base dataset settings\ndataset_type = 'CocoWholeBodyHandDataset'\ndata_mode = 'topdown'\ndata_root = 'data/coco/'\n\n# pipelines\ntrain_pipeline = [\n dict(type='LoadImage', file_client_args={{_base_.file_client_args}}),\n dict(type='GetBBoxCenterScale'),\n dict(\n type='RandomBBoxTransform', rotate_factor=180,\n scale_factor=(0.7, 1.3)),\n dict(type='RandomFlip', direction='horizontal'),\n dict(type='TopdownAffine', input_size=codec['input_size']),\n dict(type='GenerateTarget', encoder=codec),\n dict(type='PackPoseInputs')\n]\nval_pipeline = [\n dict(type='LoadImage', file_client_args={{_base_.file_client_args}}),\n dict(type='GetBBoxCenterScale'),\n dict(type='TopdownAffine', input_size=codec['input_size']),\n dict(type='PackPoseInputs')\n]\n\n# data loaders\ntrain_dataloader = dict(\n batch_size=32,\n num_workers=2,\n persistent_workers=True,\n sampler=dict(type='DefaultSampler', shuffle=True),\n dataset=dict(\n type=dataset_type,\n data_root=data_root,\n data_mode=data_mode,\n ann_file='annotations/coco_wholebody_train_v1.0.json',\n data_prefix=dict(img='train2017/'),\n pipeline=train_pipeline,\n ))\nval_dataloader = dict(\n batch_size=32,\n num_workers=2,\n persistent_workers=True,\n drop_last=False,\n sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),\n dataset=dict(\n type=dataset_type,\n data_root=data_root,\n data_mode=data_mode,\n ann_file='annotations/coco_wholebody_val_v1.0.json',\n data_prefix=dict(img='val2017/'),\n test_mode=True,\n pipeline=val_pipeline,\n ))\ntest_dataloader = val_dataloader\n\nval_evaluator = [\n dict(type='PCKAccuracy', thr=0.2),\n dict(type='AUC'),\n dict(type='EPE')\n]\ntest_evaluator = val_evaluator\n", "path": "configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py"}], "after_files": [{"content": "_base_ = ['../../../_base_/default_runtime.py']\n\n# runtime\ntrain_cfg = dict(max_epochs=210, val_interval=10)\n\n# optimizer\noptim_wrapper = dict(optimizer=dict(\n type='Adam',\n lr=5e-4,\n))\n\n# learning policy\nparam_scheduler = [\n dict(\n type='LinearLR', begin=0, end=500, start_factor=0.001,\n by_epoch=False), # warm-up\n dict(\n type='MultiStepLR',\n begin=0,\n end=210,\n milestones=[170, 200],\n gamma=0.1,\n by_epoch=True)\n]\n\n# automatically scaling LR based on the actual training batch size\nauto_scale_lr = dict(base_batch_size=256)\n\n# hooks\ndefault_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater'))\n# codec settings\ncodec = dict(\n type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2)\n\n# model settings\nmodel = dict(\n type='TopdownPoseEstimator',\n data_preprocessor=dict(\n type='PoseDataPreprocessor',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n bgr_to_rgb=True),\n backbone=dict(\n type='MobileNetV2',\n widen_factor=1.,\n out_indices=(7, ),\n init_cfg=dict(type='Pretrained', checkpoint='mmcls://mobilenet_v2')),\n head=dict(\n type='HeatmapHead',\n in_channels=1280,\n out_channels=21,\n loss=dict(type='KeypointMSELoss', use_target_weight=True),\n decoder=codec),\n test_cfg=dict(\n flip_test=True,\n flip_mode='heatmap',\n shift_heatmap=True,\n ))\n\n# base dataset settings\ndataset_type = 'CocoWholeBodyHandDataset'\ndata_mode = 'topdown'\ndata_root = 'data/coco/'\n\n# pipelines\ntrain_pipeline = [\n dict(type='LoadImage', file_client_args={{_base_.file_client_args}}),\n dict(type='GetBBoxCenterScale'),\n dict(\n type='RandomBBoxTransform', rotate_factor=180,\n scale_factor=(0.7, 1.3)),\n dict(type='RandomFlip', direction='horizontal'),\n dict(type='TopdownAffine', input_size=codec['input_size']),\n dict(type='GenerateTarget', encoder=codec),\n dict(type='PackPoseInputs')\n]\nval_pipeline = [\n dict(type='LoadImage', file_client_args={{_base_.file_client_args}}),\n dict(type='GetBBoxCenterScale'),\n dict(type='TopdownAffine', input_size=codec['input_size']),\n dict(type='PackPoseInputs')\n]\n\n# data loaders\ntrain_dataloader = dict(\n batch_size=32,\n num_workers=2,\n persistent_workers=True,\n sampler=dict(type='DefaultSampler', shuffle=True),\n dataset=dict(\n type=dataset_type,\n data_root=data_root,\n data_mode=data_mode,\n ann_file='annotations/coco_wholebody_train_v1.0.json',\n data_prefix=dict(img='train2017/'),\n pipeline=train_pipeline,\n ))\nval_dataloader = dict(\n batch_size=32,\n num_workers=2,\n persistent_workers=True,\n drop_last=False,\n sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),\n dataset=dict(\n type=dataset_type,\n data_root=data_root,\n data_mode=data_mode,\n ann_file='annotations/coco_wholebody_val_v1.0.json',\n data_prefix=dict(img='val2017/'),\n test_mode=True,\n pipeline=val_pipeline,\n ))\ntest_dataloader = val_dataloader\n\nval_evaluator = [\n dict(type='PCKAccuracy', thr=0.2),\n dict(type='AUC'),\n dict(type='EPE')\n]\ntest_evaluator = val_evaluator\n", "path": "configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py"}]}
| 1,585 | 316 |
gh_patches_debug_28870
|
rasdani/github-patches
|
git_diff
|
arviz-devs__arviz-1435
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plot_compare issue with pWAIC
The plot_compare method seems to add the pWAIC values to the in-sample deviance to get WAIC values regardless of scale (deviance or log). Shouldn't the pWAIC be subtracted in the log scale, where a higher score is better? Otherwise, for example with two models m1 and m2 with the same in-sample deviance of 20: if m1 has pWAIC of 10, m2 has pWAIC of 5 then m1 WAIC is 30 and m2 WAIC is 25 so m1 is preferred. However, with the same in-sample deviance the model with the lower pWAIC should be preferred i.e. m2.
Example from my work:

I might be getting confused about this and my example isn't well explained, sorry.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `arviz/plots/backends/bokeh/compareplot.py`
Content:
```
1 """Bokeh Compareplot."""
2 from bokeh.models import Span
3
4 from ...plot_utils import _scale_fig_size
5 from .. import show_layout
6 from . import backend_kwarg_defaults, create_axes_grid
7
8
9 def plot_compare(
10 ax,
11 comp_df,
12 figsize,
13 plot_ic_diff,
14 plot_standard_error,
15 insample_dev,
16 yticks_pos,
17 yticks_labels,
18 plot_kwargs,
19 textsize,
20 information_criterion,
21 step,
22 backend_kwargs,
23 show,
24 ):
25 """Bokeh compareplot."""
26 if backend_kwargs is None:
27 backend_kwargs = {}
28
29 backend_kwargs = {
30 **backend_kwarg_defaults(),
31 **backend_kwargs,
32 }
33
34 figsize, _, _, _, line_width, _ = _scale_fig_size(figsize, textsize, 1, 1)
35
36 if ax is None:
37 ax = create_axes_grid(
38 1,
39 figsize=figsize,
40 squeeze=True,
41 backend_kwargs=backend_kwargs,
42 )
43
44 yticks_pos = list(yticks_pos)
45
46 if plot_ic_diff:
47 yticks_labels[0] = comp_df.index[0]
48 yticks_labels[2::2] = comp_df.index[1:]
49
50 ax.yaxis.ticker = yticks_pos
51 ax.yaxis.major_label_overrides = {
52 dtype(key): value
53 for key, value in zip(yticks_pos, yticks_labels)
54 for dtype in (int, float)
55 if (dtype(key) - key == 0)
56 }
57
58 # create the coordinates for the errorbars
59 err_xs = []
60 err_ys = []
61
62 for x, y, xerr in zip(
63 comp_df[information_criterion].iloc[1:], yticks_pos[1::2], comp_df.dse[1:]
64 ):
65 err_xs.append((x - xerr, x + xerr))
66 err_ys.append((y, y))
67
68 # plot them
69 ax.triangle(
70 comp_df[information_criterion].iloc[1:],
71 yticks_pos[1::2],
72 line_color=plot_kwargs.get("color_dse", "grey"),
73 fill_color=plot_kwargs.get("color_dse", "grey"),
74 line_width=2,
75 size=6,
76 )
77 ax.multi_line(err_xs, err_ys, line_color=plot_kwargs.get("color_dse", "grey"))
78
79 else:
80 yticks_labels = comp_df.index
81 ax.yaxis.ticker = yticks_pos[::2]
82 ax.yaxis.major_label_overrides = {
83 key: value for key, value in zip(yticks_pos[::2], yticks_labels)
84 }
85
86 ax.circle(
87 comp_df[information_criterion],
88 yticks_pos[::2],
89 line_color=plot_kwargs.get("color_ic", "black"),
90 fill_color=None,
91 line_width=2,
92 size=6,
93 )
94
95 if plot_standard_error:
96 # create the coordinates for the errorbars
97 err_xs = []
98 err_ys = []
99
100 for x, y, xerr in zip(comp_df[information_criterion], yticks_pos[::2], comp_df.se):
101 err_xs.append((x - xerr, x + xerr))
102 err_ys.append((y, y))
103
104 # plot them
105 ax.multi_line(err_xs, err_ys, line_color=plot_kwargs.get("color_ic", "black"))
106
107 if insample_dev:
108 ax.circle(
109 comp_df[information_criterion] - (2 * comp_df["p_" + information_criterion]),
110 yticks_pos[::2],
111 line_color=plot_kwargs.get("color_insample_dev", "black"),
112 fill_color=plot_kwargs.get("color_insample_dev", "black"),
113 line_width=2,
114 size=6,
115 )
116
117 vline = Span(
118 location=comp_df[information_criterion].iloc[0],
119 dimension="height",
120 line_color=plot_kwargs.get("color_ls_min_ic", "grey"),
121 line_width=line_width,
122 line_dash=plot_kwargs.get("ls_min_ic", "dashed"),
123 )
124
125 ax.renderers.append(vline)
126
127 scale_col = information_criterion + "_scale"
128 if scale_col in comp_df:
129 scale = comp_df[scale_col].iloc[0].capitalize()
130 else:
131 scale = "Deviance"
132 ax.xaxis.axis_label = scale
133 ax.y_range._property_values["start"] = -1 + step # pylint: disable=protected-access
134 ax.y_range._property_values["end"] = 0 - step # pylint: disable=protected-access
135
136 show_layout(ax, show)
137
138 return ax
139
```
Path: `arviz/plots/backends/matplotlib/compareplot.py`
Content:
```
1 """Matplotlib Compareplot."""
2 import matplotlib.pyplot as plt
3
4 from ...plot_utils import _scale_fig_size
5 from . import backend_kwarg_defaults, backend_show, create_axes_grid
6
7
8 def plot_compare(
9 ax,
10 comp_df,
11 figsize,
12 plot_ic_diff,
13 plot_standard_error,
14 insample_dev,
15 yticks_pos,
16 yticks_labels,
17 plot_kwargs,
18 information_criterion,
19 textsize,
20 step,
21 backend_kwargs,
22 show,
23 ):
24 """Matplotlib compare plot."""
25 if backend_kwargs is None:
26 backend_kwargs = {}
27
28 backend_kwargs = {
29 **backend_kwarg_defaults(),
30 **backend_kwargs,
31 }
32
33 if figsize is None:
34 figsize = (6, len(comp_df))
35
36 figsize, ax_labelsize, _, xt_labelsize, linewidth, _ = _scale_fig_size(figsize, textsize, 1, 1)
37
38 backend_kwargs.setdefault("figsize", figsize)
39 backend_kwargs["squeeze"] = True
40
41 if ax is None:
42 _, ax = create_axes_grid(1, backend_kwargs=backend_kwargs)
43
44 if plot_ic_diff:
45 yticks_labels[0] = comp_df.index[0]
46 yticks_labels[2::2] = comp_df.index[1:]
47 ax.set_yticks(yticks_pos)
48 ax.errorbar(
49 x=comp_df[information_criterion].iloc[1:],
50 y=yticks_pos[1::2],
51 xerr=comp_df.dse[1:],
52 color=plot_kwargs.get("color_dse", "grey"),
53 fmt=plot_kwargs.get("marker_dse", "^"),
54 mew=linewidth,
55 elinewidth=linewidth,
56 )
57
58 else:
59 yticks_labels = comp_df.index
60 ax.set_yticks(yticks_pos[::2])
61
62 if plot_standard_error:
63 ax.errorbar(
64 x=comp_df[information_criterion],
65 y=yticks_pos[::2],
66 xerr=comp_df.se,
67 color=plot_kwargs.get("color_ic", "k"),
68 fmt=plot_kwargs.get("marker_ic", "o"),
69 mfc="None",
70 mew=linewidth,
71 lw=linewidth,
72 )
73 else:
74 ax.plot(
75 comp_df[information_criterion],
76 yticks_pos[::2],
77 color=plot_kwargs.get("color_ic", "k"),
78 marker=plot_kwargs.get("marker_ic", "o"),
79 mfc="None",
80 mew=linewidth,
81 lw=0,
82 )
83
84 if insample_dev:
85 ax.plot(
86 comp_df[information_criterion] - (2 * comp_df["p_" + information_criterion]),
87 yticks_pos[::2],
88 color=plot_kwargs.get("color_insample_dev", "k"),
89 marker=plot_kwargs.get("marker_insample_dev", "o"),
90 mew=linewidth,
91 lw=0,
92 )
93
94 ax.axvline(
95 comp_df[information_criterion].iloc[0],
96 ls=plot_kwargs.get("ls_min_ic", "--"),
97 color=plot_kwargs.get("color_ls_min_ic", "grey"),
98 lw=linewidth,
99 )
100
101 scale_col = information_criterion + "_scale"
102 if scale_col in comp_df:
103 scale = comp_df[scale_col].iloc[0].capitalize()
104 else:
105 scale = "Deviance"
106 ax.set_xlabel(scale, fontsize=ax_labelsize)
107 ax.set_yticklabels(yticks_labels)
108 ax.set_ylim(-1 + step, 0 - step)
109 ax.tick_params(labelsize=xt_labelsize)
110
111 if backend_show(show):
112 plt.show()
113
114 return ax
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/arviz/plots/backends/bokeh/compareplot.py b/arviz/plots/backends/bokeh/compareplot.py
--- a/arviz/plots/backends/bokeh/compareplot.py
+++ b/arviz/plots/backends/bokeh/compareplot.py
@@ -105,8 +105,16 @@
ax.multi_line(err_xs, err_ys, line_color=plot_kwargs.get("color_ic", "black"))
if insample_dev:
+ scale = comp_df[f"{information_criterion}_scale"][0]
+ p_ic = comp_df[f"p_{information_criterion}"]
+ if scale == "log":
+ correction = p_ic
+ elif scale == "negative_log":
+ correction = -p_ic
+ elif scale == "deviance":
+ correction = -(2 * p_ic)
ax.circle(
- comp_df[information_criterion] - (2 * comp_df["p_" + information_criterion]),
+ comp_df[information_criterion] + correction,
yticks_pos[::2],
line_color=plot_kwargs.get("color_insample_dev", "black"),
fill_color=plot_kwargs.get("color_insample_dev", "black"),
diff --git a/arviz/plots/backends/matplotlib/compareplot.py b/arviz/plots/backends/matplotlib/compareplot.py
--- a/arviz/plots/backends/matplotlib/compareplot.py
+++ b/arviz/plots/backends/matplotlib/compareplot.py
@@ -82,8 +82,16 @@
)
if insample_dev:
+ scale = comp_df[f"{information_criterion}_scale"][0]
+ p_ic = comp_df[f"p_{information_criterion}"]
+ if scale == "log":
+ correction = p_ic
+ elif scale == "negative_log":
+ correction = -p_ic
+ elif scale == "deviance":
+ correction = -(2 * p_ic)
ax.plot(
- comp_df[information_criterion] - (2 * comp_df["p_" + information_criterion]),
+ comp_df[information_criterion] + correction,
yticks_pos[::2],
color=plot_kwargs.get("color_insample_dev", "k"),
marker=plot_kwargs.get("marker_insample_dev", "o"),
|
{"golden_diff": "diff --git a/arviz/plots/backends/bokeh/compareplot.py b/arviz/plots/backends/bokeh/compareplot.py\n--- a/arviz/plots/backends/bokeh/compareplot.py\n+++ b/arviz/plots/backends/bokeh/compareplot.py\n@@ -105,8 +105,16 @@\n ax.multi_line(err_xs, err_ys, line_color=plot_kwargs.get(\"color_ic\", \"black\"))\n \n if insample_dev:\n+ scale = comp_df[f\"{information_criterion}_scale\"][0]\n+ p_ic = comp_df[f\"p_{information_criterion}\"]\n+ if scale == \"log\":\n+ correction = p_ic\n+ elif scale == \"negative_log\":\n+ correction = -p_ic\n+ elif scale == \"deviance\":\n+ correction = -(2 * p_ic)\n ax.circle(\n- comp_df[information_criterion] - (2 * comp_df[\"p_\" + information_criterion]),\n+ comp_df[information_criterion] + correction,\n yticks_pos[::2],\n line_color=plot_kwargs.get(\"color_insample_dev\", \"black\"),\n fill_color=plot_kwargs.get(\"color_insample_dev\", \"black\"),\ndiff --git a/arviz/plots/backends/matplotlib/compareplot.py b/arviz/plots/backends/matplotlib/compareplot.py\n--- a/arviz/plots/backends/matplotlib/compareplot.py\n+++ b/arviz/plots/backends/matplotlib/compareplot.py\n@@ -82,8 +82,16 @@\n )\n \n if insample_dev:\n+ scale = comp_df[f\"{information_criterion}_scale\"][0]\n+ p_ic = comp_df[f\"p_{information_criterion}\"]\n+ if scale == \"log\":\n+ correction = p_ic\n+ elif scale == \"negative_log\":\n+ correction = -p_ic\n+ elif scale == \"deviance\":\n+ correction = -(2 * p_ic)\n ax.plot(\n- comp_df[information_criterion] - (2 * comp_df[\"p_\" + information_criterion]),\n+ comp_df[information_criterion] + correction,\n yticks_pos[::2],\n color=plot_kwargs.get(\"color_insample_dev\", \"k\"),\n marker=plot_kwargs.get(\"marker_insample_dev\", \"o\"),\n", "issue": "plot_compare issue with pWAIC\nThe plot_compare method seems to add the pWAIC values to the in-sample deviance to get WAIC values regardless of scale (deviance or log). Shouldn't the pWAIC be subtracted in the log scale, where a higher score is better? Otherwise, for example with two models m1 and m2 with the same in-sample deviance of 20: if m1 has pWAIC of 10, m2 has pWAIC of 5 then m1 WAIC is 30 and m2 WAIC is 25 so m1 is preferred. However, with the same in-sample deviance the model with the lower pWAIC should be preferred i.e. m2.\r\n\r\nExample from my work:\r\n\r\n\r\n\r\nI might be getting confused about this and my example isn't well explained, sorry.\n", "before_files": [{"content": "\"\"\"Bokeh Compareplot.\"\"\"\nfrom bokeh.models import Span\n\nfrom ...plot_utils import _scale_fig_size\nfrom .. import show_layout\nfrom . import backend_kwarg_defaults, create_axes_grid\n\n\ndef plot_compare(\n ax,\n comp_df,\n figsize,\n plot_ic_diff,\n plot_standard_error,\n insample_dev,\n yticks_pos,\n yticks_labels,\n plot_kwargs,\n textsize,\n information_criterion,\n step,\n backend_kwargs,\n show,\n):\n \"\"\"Bokeh compareplot.\"\"\"\n if backend_kwargs is None:\n backend_kwargs = {}\n\n backend_kwargs = {\n **backend_kwarg_defaults(),\n **backend_kwargs,\n }\n\n figsize, _, _, _, line_width, _ = _scale_fig_size(figsize, textsize, 1, 1)\n\n if ax is None:\n ax = create_axes_grid(\n 1,\n figsize=figsize,\n squeeze=True,\n backend_kwargs=backend_kwargs,\n )\n\n yticks_pos = list(yticks_pos)\n\n if plot_ic_diff:\n yticks_labels[0] = comp_df.index[0]\n yticks_labels[2::2] = comp_df.index[1:]\n\n ax.yaxis.ticker = yticks_pos\n ax.yaxis.major_label_overrides = {\n dtype(key): value\n for key, value in zip(yticks_pos, yticks_labels)\n for dtype in (int, float)\n if (dtype(key) - key == 0)\n }\n\n # create the coordinates for the errorbars\n err_xs = []\n err_ys = []\n\n for x, y, xerr in zip(\n comp_df[information_criterion].iloc[1:], yticks_pos[1::2], comp_df.dse[1:]\n ):\n err_xs.append((x - xerr, x + xerr))\n err_ys.append((y, y))\n\n # plot them\n ax.triangle(\n comp_df[information_criterion].iloc[1:],\n yticks_pos[1::2],\n line_color=plot_kwargs.get(\"color_dse\", \"grey\"),\n fill_color=plot_kwargs.get(\"color_dse\", \"grey\"),\n line_width=2,\n size=6,\n )\n ax.multi_line(err_xs, err_ys, line_color=plot_kwargs.get(\"color_dse\", \"grey\"))\n\n else:\n yticks_labels = comp_df.index\n ax.yaxis.ticker = yticks_pos[::2]\n ax.yaxis.major_label_overrides = {\n key: value for key, value in zip(yticks_pos[::2], yticks_labels)\n }\n\n ax.circle(\n comp_df[information_criterion],\n yticks_pos[::2],\n line_color=plot_kwargs.get(\"color_ic\", \"black\"),\n fill_color=None,\n line_width=2,\n size=6,\n )\n\n if plot_standard_error:\n # create the coordinates for the errorbars\n err_xs = []\n err_ys = []\n\n for x, y, xerr in zip(comp_df[information_criterion], yticks_pos[::2], comp_df.se):\n err_xs.append((x - xerr, x + xerr))\n err_ys.append((y, y))\n\n # plot them\n ax.multi_line(err_xs, err_ys, line_color=plot_kwargs.get(\"color_ic\", \"black\"))\n\n if insample_dev:\n ax.circle(\n comp_df[information_criterion] - (2 * comp_df[\"p_\" + information_criterion]),\n yticks_pos[::2],\n line_color=plot_kwargs.get(\"color_insample_dev\", \"black\"),\n fill_color=plot_kwargs.get(\"color_insample_dev\", \"black\"),\n line_width=2,\n size=6,\n )\n\n vline = Span(\n location=comp_df[information_criterion].iloc[0],\n dimension=\"height\",\n line_color=plot_kwargs.get(\"color_ls_min_ic\", \"grey\"),\n line_width=line_width,\n line_dash=plot_kwargs.get(\"ls_min_ic\", \"dashed\"),\n )\n\n ax.renderers.append(vline)\n\n scale_col = information_criterion + \"_scale\"\n if scale_col in comp_df:\n scale = comp_df[scale_col].iloc[0].capitalize()\n else:\n scale = \"Deviance\"\n ax.xaxis.axis_label = scale\n ax.y_range._property_values[\"start\"] = -1 + step # pylint: disable=protected-access\n ax.y_range._property_values[\"end\"] = 0 - step # pylint: disable=protected-access\n\n show_layout(ax, show)\n\n return ax\n", "path": "arviz/plots/backends/bokeh/compareplot.py"}, {"content": "\"\"\"Matplotlib Compareplot.\"\"\"\nimport matplotlib.pyplot as plt\n\nfrom ...plot_utils import _scale_fig_size\nfrom . import backend_kwarg_defaults, backend_show, create_axes_grid\n\n\ndef plot_compare(\n ax,\n comp_df,\n figsize,\n plot_ic_diff,\n plot_standard_error,\n insample_dev,\n yticks_pos,\n yticks_labels,\n plot_kwargs,\n information_criterion,\n textsize,\n step,\n backend_kwargs,\n show,\n):\n \"\"\"Matplotlib compare plot.\"\"\"\n if backend_kwargs is None:\n backend_kwargs = {}\n\n backend_kwargs = {\n **backend_kwarg_defaults(),\n **backend_kwargs,\n }\n\n if figsize is None:\n figsize = (6, len(comp_df))\n\n figsize, ax_labelsize, _, xt_labelsize, linewidth, _ = _scale_fig_size(figsize, textsize, 1, 1)\n\n backend_kwargs.setdefault(\"figsize\", figsize)\n backend_kwargs[\"squeeze\"] = True\n\n if ax is None:\n _, ax = create_axes_grid(1, backend_kwargs=backend_kwargs)\n\n if plot_ic_diff:\n yticks_labels[0] = comp_df.index[0]\n yticks_labels[2::2] = comp_df.index[1:]\n ax.set_yticks(yticks_pos)\n ax.errorbar(\n x=comp_df[information_criterion].iloc[1:],\n y=yticks_pos[1::2],\n xerr=comp_df.dse[1:],\n color=plot_kwargs.get(\"color_dse\", \"grey\"),\n fmt=plot_kwargs.get(\"marker_dse\", \"^\"),\n mew=linewidth,\n elinewidth=linewidth,\n )\n\n else:\n yticks_labels = comp_df.index\n ax.set_yticks(yticks_pos[::2])\n\n if plot_standard_error:\n ax.errorbar(\n x=comp_df[information_criterion],\n y=yticks_pos[::2],\n xerr=comp_df.se,\n color=plot_kwargs.get(\"color_ic\", \"k\"),\n fmt=plot_kwargs.get(\"marker_ic\", \"o\"),\n mfc=\"None\",\n mew=linewidth,\n lw=linewidth,\n )\n else:\n ax.plot(\n comp_df[information_criterion],\n yticks_pos[::2],\n color=plot_kwargs.get(\"color_ic\", \"k\"),\n marker=plot_kwargs.get(\"marker_ic\", \"o\"),\n mfc=\"None\",\n mew=linewidth,\n lw=0,\n )\n\n if insample_dev:\n ax.plot(\n comp_df[information_criterion] - (2 * comp_df[\"p_\" + information_criterion]),\n yticks_pos[::2],\n color=plot_kwargs.get(\"color_insample_dev\", \"k\"),\n marker=plot_kwargs.get(\"marker_insample_dev\", \"o\"),\n mew=linewidth,\n lw=0,\n )\n\n ax.axvline(\n comp_df[information_criterion].iloc[0],\n ls=plot_kwargs.get(\"ls_min_ic\", \"--\"),\n color=plot_kwargs.get(\"color_ls_min_ic\", \"grey\"),\n lw=linewidth,\n )\n\n scale_col = information_criterion + \"_scale\"\n if scale_col in comp_df:\n scale = comp_df[scale_col].iloc[0].capitalize()\n else:\n scale = \"Deviance\"\n ax.set_xlabel(scale, fontsize=ax_labelsize)\n ax.set_yticklabels(yticks_labels)\n ax.set_ylim(-1 + step, 0 - step)\n ax.tick_params(labelsize=xt_labelsize)\n\n if backend_show(show):\n plt.show()\n\n return ax\n", "path": "arviz/plots/backends/matplotlib/compareplot.py"}], "after_files": [{"content": "\"\"\"Bokeh Compareplot.\"\"\"\nfrom bokeh.models import Span\n\nfrom ...plot_utils import _scale_fig_size\nfrom .. import show_layout\nfrom . import backend_kwarg_defaults, create_axes_grid\n\n\ndef plot_compare(\n ax,\n comp_df,\n figsize,\n plot_ic_diff,\n plot_standard_error,\n insample_dev,\n yticks_pos,\n yticks_labels,\n plot_kwargs,\n textsize,\n information_criterion,\n step,\n backend_kwargs,\n show,\n):\n \"\"\"Bokeh compareplot.\"\"\"\n if backend_kwargs is None:\n backend_kwargs = {}\n\n backend_kwargs = {\n **backend_kwarg_defaults(),\n **backend_kwargs,\n }\n\n figsize, _, _, _, line_width, _ = _scale_fig_size(figsize, textsize, 1, 1)\n\n if ax is None:\n ax = create_axes_grid(\n 1,\n figsize=figsize,\n squeeze=True,\n backend_kwargs=backend_kwargs,\n )\n\n yticks_pos = list(yticks_pos)\n\n if plot_ic_diff:\n yticks_labels[0] = comp_df.index[0]\n yticks_labels[2::2] = comp_df.index[1:]\n\n ax.yaxis.ticker = yticks_pos\n ax.yaxis.major_label_overrides = {\n dtype(key): value\n for key, value in zip(yticks_pos, yticks_labels)\n for dtype in (int, float)\n if (dtype(key) - key == 0)\n }\n\n # create the coordinates for the errorbars\n err_xs = []\n err_ys = []\n\n for x, y, xerr in zip(\n comp_df[information_criterion].iloc[1:], yticks_pos[1::2], comp_df.dse[1:]\n ):\n err_xs.append((x - xerr, x + xerr))\n err_ys.append((y, y))\n\n # plot them\n ax.triangle(\n comp_df[information_criterion].iloc[1:],\n yticks_pos[1::2],\n line_color=plot_kwargs.get(\"color_dse\", \"grey\"),\n fill_color=plot_kwargs.get(\"color_dse\", \"grey\"),\n line_width=2,\n size=6,\n )\n ax.multi_line(err_xs, err_ys, line_color=plot_kwargs.get(\"color_dse\", \"grey\"))\n\n else:\n yticks_labels = comp_df.index\n ax.yaxis.ticker = yticks_pos[::2]\n ax.yaxis.major_label_overrides = {\n key: value for key, value in zip(yticks_pos[::2], yticks_labels)\n }\n\n ax.circle(\n comp_df[information_criterion],\n yticks_pos[::2],\n line_color=plot_kwargs.get(\"color_ic\", \"black\"),\n fill_color=None,\n line_width=2,\n size=6,\n )\n\n if plot_standard_error:\n # create the coordinates for the errorbars\n err_xs = []\n err_ys = []\n\n for x, y, xerr in zip(comp_df[information_criterion], yticks_pos[::2], comp_df.se):\n err_xs.append((x - xerr, x + xerr))\n err_ys.append((y, y))\n\n # plot them\n ax.multi_line(err_xs, err_ys, line_color=plot_kwargs.get(\"color_ic\", \"black\"))\n\n if insample_dev:\n scale = comp_df[f\"{information_criterion}_scale\"][0]\n p_ic = comp_df[f\"p_{information_criterion}\"]\n if scale == \"log\":\n correction = p_ic\n elif scale == \"negative_log\":\n correction = -p_ic\n elif scale == \"deviance\":\n correction = -(2 * p_ic)\n ax.circle(\n comp_df[information_criterion] + correction,\n yticks_pos[::2],\n line_color=plot_kwargs.get(\"color_insample_dev\", \"black\"),\n fill_color=plot_kwargs.get(\"color_insample_dev\", \"black\"),\n line_width=2,\n size=6,\n )\n\n vline = Span(\n location=comp_df[information_criterion].iloc[0],\n dimension=\"height\",\n line_color=plot_kwargs.get(\"color_ls_min_ic\", \"grey\"),\n line_width=line_width,\n line_dash=plot_kwargs.get(\"ls_min_ic\", \"dashed\"),\n )\n\n ax.renderers.append(vline)\n\n scale_col = information_criterion + \"_scale\"\n if scale_col in comp_df:\n scale = comp_df[scale_col].iloc[0].capitalize()\n else:\n scale = \"Deviance\"\n ax.xaxis.axis_label = scale\n ax.y_range._property_values[\"start\"] = -1 + step # pylint: disable=protected-access\n ax.y_range._property_values[\"end\"] = 0 - step # pylint: disable=protected-access\n\n show_layout(ax, show)\n\n return ax\n", "path": "arviz/plots/backends/bokeh/compareplot.py"}, {"content": "\"\"\"Matplotlib Compareplot.\"\"\"\nimport matplotlib.pyplot as plt\n\nfrom ...plot_utils import _scale_fig_size\nfrom . import backend_kwarg_defaults, backend_show, create_axes_grid\n\n\ndef plot_compare(\n ax,\n comp_df,\n figsize,\n plot_ic_diff,\n plot_standard_error,\n insample_dev,\n yticks_pos,\n yticks_labels,\n plot_kwargs,\n information_criterion,\n textsize,\n step,\n backend_kwargs,\n show,\n):\n \"\"\"Matplotlib compare plot.\"\"\"\n if backend_kwargs is None:\n backend_kwargs = {}\n\n backend_kwargs = {\n **backend_kwarg_defaults(),\n **backend_kwargs,\n }\n\n if figsize is None:\n figsize = (6, len(comp_df))\n\n figsize, ax_labelsize, _, xt_labelsize, linewidth, _ = _scale_fig_size(figsize, textsize, 1, 1)\n\n backend_kwargs.setdefault(\"figsize\", figsize)\n backend_kwargs[\"squeeze\"] = True\n\n if ax is None:\n _, ax = create_axes_grid(1, backend_kwargs=backend_kwargs)\n\n if plot_ic_diff:\n yticks_labels[0] = comp_df.index[0]\n yticks_labels[2::2] = comp_df.index[1:]\n ax.set_yticks(yticks_pos)\n ax.errorbar(\n x=comp_df[information_criterion].iloc[1:],\n y=yticks_pos[1::2],\n xerr=comp_df.dse[1:],\n color=plot_kwargs.get(\"color_dse\", \"grey\"),\n fmt=plot_kwargs.get(\"marker_dse\", \"^\"),\n mew=linewidth,\n elinewidth=linewidth,\n )\n\n else:\n yticks_labels = comp_df.index\n ax.set_yticks(yticks_pos[::2])\n\n if plot_standard_error:\n ax.errorbar(\n x=comp_df[information_criterion],\n y=yticks_pos[::2],\n xerr=comp_df.se,\n color=plot_kwargs.get(\"color_ic\", \"k\"),\n fmt=plot_kwargs.get(\"marker_ic\", \"o\"),\n mfc=\"None\",\n mew=linewidth,\n lw=linewidth,\n )\n else:\n ax.plot(\n comp_df[information_criterion],\n yticks_pos[::2],\n color=plot_kwargs.get(\"color_ic\", \"k\"),\n marker=plot_kwargs.get(\"marker_ic\", \"o\"),\n mfc=\"None\",\n mew=linewidth,\n lw=0,\n )\n\n if insample_dev:\n scale = comp_df[f\"{information_criterion}_scale\"][0]\n p_ic = comp_df[f\"p_{information_criterion}\"]\n if scale == \"log\":\n correction = p_ic\n elif scale == \"negative_log\":\n correction = -p_ic\n elif scale == \"deviance\":\n correction = -(2 * p_ic)\n ax.plot(\n comp_df[information_criterion] + correction,\n yticks_pos[::2],\n color=plot_kwargs.get(\"color_insample_dev\", \"k\"),\n marker=plot_kwargs.get(\"marker_insample_dev\", \"o\"),\n mew=linewidth,\n lw=0,\n )\n\n ax.axvline(\n comp_df[information_criterion].iloc[0],\n ls=plot_kwargs.get(\"ls_min_ic\", \"--\"),\n color=plot_kwargs.get(\"color_ls_min_ic\", \"grey\"),\n lw=linewidth,\n )\n\n scale_col = information_criterion + \"_scale\"\n if scale_col in comp_df:\n scale = comp_df[scale_col].iloc[0].capitalize()\n else:\n scale = \"Deviance\"\n ax.set_xlabel(scale, fontsize=ax_labelsize)\n ax.set_yticklabels(yticks_labels)\n ax.set_ylim(-1 + step, 0 - step)\n ax.tick_params(labelsize=xt_labelsize)\n\n if backend_show(show):\n plt.show()\n\n return ax\n", "path": "arviz/plots/backends/matplotlib/compareplot.py"}]}
| 2,878 | 514 |
gh_patches_debug_40016
|
rasdani/github-patches
|
git_diff
|
pantsbuild__pants-15907
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Adjust plugin thread-local APIs to account for `WorkunitStore` state
When `eager_fetch=False`, it's possible that a workunit's "artifacts" contain `Digest`s which haven't actually been fetched. When that's the case for a `Digest`, and a `StreamingWorkunit` plugin is using any of the [context methods which fetch files](https://github.com/pantsbuild/pants/blob/1d8205538a2932badcc1738fb1288600908b01e1/src/python/pants/engine/streaming_workunit_handler.py#L55-L69) from a background thread, they will encounter a:
> A WorkunitStore has not been set for this thread.
...error. That's because our existing `native_engine.stdio_thread_set_destination` statics only set the thread local `stdio` state, and not also our workunit state.
----
To fix this, we should adjust the existing method to additionally set the workunit store. But we should also deprecate the existing method and add a new one with a more accurate name (replacing #12295).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/python/pants/engine/streaming_workunit_handler.py`
Content:
```
1 # Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import annotations
5
6 import logging
7 import threading
8 from abc import ABC, abstractmethod
9 from dataclasses import dataclass
10 from typing import Any, Callable, Iterable, Sequence, Tuple
11
12 from pants.base.specs import Specs
13 from pants.engine.addresses import Addresses
14 from pants.engine.fs import Digest, DigestContents, FileDigest, Snapshot
15 from pants.engine.internals import native_engine
16 from pants.engine.internals.scheduler import SchedulerSession, Workunit
17 from pants.engine.internals.selectors import Params
18 from pants.engine.rules import Get, MultiGet, QueryRule, collect_rules, rule
19 from pants.engine.target import Targets
20 from pants.engine.unions import UnionMembership, union
21 from pants.goal.run_tracker import RunTracker
22 from pants.option.options_bootstrapper import OptionsBootstrapper
23 from pants.util.logging import LogLevel
24
25 logger = logging.getLogger(__name__)
26
27
28 # -----------------------------------------------------------------------------------------------
29 # Streaming workunits plugin API
30 # -----------------------------------------------------------------------------------------------
31
32
33 @dataclass(frozen=True)
34 class TargetInfo:
35 filename: str
36
37
38 @dataclass(frozen=True)
39 class ExpandedSpecs:
40 targets: dict[str, list[TargetInfo]]
41
42
43 @dataclass(frozen=True)
44 class StreamingWorkunitContext:
45 _scheduler: SchedulerSession
46 _run_tracker: RunTracker
47 _specs: Specs
48 _options_bootstrapper: OptionsBootstrapper
49
50 @property
51 def run_tracker(self) -> RunTracker:
52 """Returns the RunTracker for the current run of Pants."""
53 return self._run_tracker
54
55 def single_file_digests_to_bytes(self, digests: Sequence[FileDigest]) -> list[bytes]:
56 """Return `bytes` for each `FileDigest`."""
57 return self._scheduler.single_file_digests_to_bytes(digests)
58
59 def snapshots_to_file_contents(
60 self, snapshots: Sequence[Snapshot]
61 ) -> tuple[DigestContents, ...]:
62 """Given a sequence of Snapshot objects, return a tuple of DigestContents representing the
63 files contained in those `Snapshot`s in sequence."""
64 return self._scheduler.snapshots_to_file_contents(snapshots)
65
66 def ensure_remote_has_recursive(self, digests: Sequence[Digest | FileDigest]) -> None:
67 """Invoke the internal ensure_remote_has_recursive function, which ensures that a remote
68 ByteStore, if it exists, has a copy of the files fingerprinted by each Digest."""
69 return self._scheduler.ensure_remote_has_recursive(digests)
70
71 def get_metrics(self) -> dict[str, int]:
72 """Invoke the internal get_metrics function, which returns metrics for the Session."""
73 return self._scheduler.get_metrics()
74
75 def get_observation_histograms(self) -> dict[str, Any]:
76 """Invoke the internal get_observation_histograms function, which serializes histograms
77 generated from Pants-internal observation metrics observed during the current run of Pants.
78
79 These metrics are useful for debugging Pants internals.
80 """
81 return self._scheduler.get_observation_histograms()
82
83 def get_expanded_specs(self) -> ExpandedSpecs:
84 """Return a dict containing the canonicalized addresses of the specs for this run, and what
85 files they expand to."""
86
87 (unexpanded_addresses,) = self._scheduler.product_request(
88 Addresses, [Params(self._specs, self._options_bootstrapper)]
89 )
90
91 expanded_targets = self._scheduler.product_request(
92 Targets, [Params(Addresses([addr])) for addr in unexpanded_addresses]
93 )
94 targets_dict: dict[str, list[TargetInfo]] = {}
95 for addr, targets in zip(unexpanded_addresses, expanded_targets):
96 targets_dict[addr.spec] = [
97 TargetInfo(
98 filename=(
99 tgt.address.filename if tgt.address.is_file_target else str(tgt.address)
100 )
101 )
102 for tgt in targets
103 ]
104 return ExpandedSpecs(targets=targets_dict)
105
106
107 class WorkunitsCallback(ABC):
108 @abstractmethod
109 def __call__(
110 self,
111 *,
112 started_workunits: tuple[Workunit, ...],
113 completed_workunits: tuple[Workunit, ...],
114 finished: bool,
115 context: StreamingWorkunitContext,
116 ) -> None:
117 """
118 :started_workunits: Workunits that have started but not completed.
119 :completed_workunits: Workunits that have completed.
120 :finished: True when the last chunk of workunit data is reported to the callback.
121 :context: A context providing access to functionality relevant to the run.
122 """
123
124 @property
125 @abstractmethod
126 def can_finish_async(self) -> bool:
127 """Can this callback finish its work in the background after the Pants run has already
128 completed?
129
130 The main reason to `return False` is if your callback logs in its final call, when
131 `finished=True`, as it may end up logging to `.pantsd.d/pants.log` instead of the console,
132 which is harder for users to find. Otherwise, most callbacks should return `True` to avoid
133 slowing down Pants from finishing the run.
134 """
135
136
137 @dataclass(frozen=True)
138 class WorkunitsCallbackFactory:
139 """A wrapper around a callable that constructs WorkunitsCallbacks.
140
141 NB: This extra wrapping is because subtyping is not supported in the return position of a
142 rule. See #11354 for discussion of that limitation.
143 """
144
145 callback_factory: Callable[[], WorkunitsCallback | None]
146
147
148 class WorkunitsCallbackFactories(Tuple[WorkunitsCallbackFactory, ...]):
149 """A list of registered factories for WorkunitsCallback instances."""
150
151
152 @union
153 class WorkunitsCallbackFactoryRequest:
154 """A request for a particular WorkunitsCallbackFactory."""
155
156
157 @rule
158 async def construct_workunits_callback_factories(
159 union_membership: UnionMembership,
160 ) -> WorkunitsCallbackFactories:
161 request_types = union_membership.get(WorkunitsCallbackFactoryRequest)
162 workunit_callback_factories = await MultiGet(
163 Get(WorkunitsCallbackFactory, WorkunitsCallbackFactoryRequest, request_type())
164 for request_type in request_types
165 )
166 return WorkunitsCallbackFactories(workunit_callback_factories)
167
168
169 # -----------------------------------------------------------------------------------------------
170 # Streaming workunits handler
171 # -----------------------------------------------------------------------------------------------
172
173
174 class StreamingWorkunitHandler:
175 """Periodically calls each registered WorkunitsCallback in a dedicated thread.
176
177 This class should be used as a context manager.
178 """
179
180 def __init__(
181 self,
182 scheduler: SchedulerSession,
183 run_tracker: RunTracker,
184 callbacks: Iterable[WorkunitsCallback],
185 options_bootstrapper: OptionsBootstrapper,
186 specs: Specs,
187 report_interval_seconds: float,
188 allow_async_completion: bool,
189 max_workunit_verbosity: LogLevel,
190 ) -> None:
191 scheduler = scheduler.isolated_shallow_clone("streaming_workunit_handler_session")
192 self.callbacks = callbacks
193 self.context = StreamingWorkunitContext(
194 _scheduler=scheduler,
195 _run_tracker=run_tracker,
196 _specs=specs,
197 _options_bootstrapper=options_bootstrapper,
198 )
199 self.thread_runner = (
200 _InnerHandler(
201 scheduler=scheduler,
202 context=self.context,
203 callbacks=self.callbacks,
204 report_interval=report_interval_seconds,
205 # TODO(10092) The max verbosity should be a per-client setting, rather than a global
206 # setting.
207 max_workunit_verbosity=max_workunit_verbosity,
208 allow_async_completion=allow_async_completion,
209 )
210 if callbacks
211 else None
212 )
213
214 def __enter__(self) -> None:
215 if not self.thread_runner:
216 return
217 self.thread_runner.start()
218
219 def __exit__(self, exc_type, exc_value, traceback) -> None:
220 if not self.thread_runner:
221 return
222 self.thread_runner.end()
223 if exc_type is not None:
224 self.thread_runner.join()
225
226
227 class _InnerHandler(threading.Thread):
228 def __init__(
229 self,
230 scheduler: Any,
231 context: StreamingWorkunitContext,
232 callbacks: Iterable[WorkunitsCallback],
233 report_interval: float,
234 max_workunit_verbosity: LogLevel,
235 allow_async_completion: bool,
236 ) -> None:
237 super().__init__(daemon=True)
238 self.scheduler = scheduler
239 self.context = context
240 self.stop_request = threading.Event()
241 self.report_interval = report_interval
242 self.callbacks = callbacks
243 self.max_workunit_verbosity = max_workunit_verbosity
244 # TODO: Have a thread per callback so that some callbacks can always finish async even
245 # if others must be finished synchronously.
246 self.block_until_complete = not allow_async_completion or any(
247 callback.can_finish_async is False for callback in self.callbacks
248 )
249 # Get the parent thread's logging destination. Note that this thread has not yet started
250 # as we are only in the constructor.
251 self.logging_destination = native_engine.stdio_thread_get_destination()
252
253 def poll_workunits(self, *, finished: bool) -> None:
254 workunits = self.scheduler.poll_workunits(self.max_workunit_verbosity)
255 for callback in self.callbacks:
256 callback(
257 started_workunits=workunits["started"],
258 completed_workunits=workunits["completed"],
259 finished=finished,
260 context=self.context,
261 )
262
263 def run(self) -> None:
264 # First, set the thread's logging destination to the parent thread's, meaning the console.
265 native_engine.stdio_thread_set_destination(self.logging_destination)
266 while not self.stop_request.isSet(): # type: ignore[attr-defined]
267 self.poll_workunits(finished=False)
268 self.stop_request.wait(timeout=self.report_interval)
269 else:
270 # Make one final call. Note that this may run after the Pants run has already
271 # completed, depending on whether the thread was joined or not.
272 self.poll_workunits(finished=True)
273
274 def end(self) -> None:
275 self.stop_request.set()
276 if self.block_until_complete:
277 logger.debug(
278 "Async completion is disabled: waiting for workunit callbacks to complete..."
279 )
280 super().join()
281 else:
282 logger.debug(
283 "Async completion is enabled: workunit callbacks will complete in the background."
284 )
285
286
287 def rules():
288 return [
289 QueryRule(WorkunitsCallbackFactories, (UnionMembership,)),
290 QueryRule(Targets, (Addresses,)),
291 QueryRule(Addresses, (Specs, OptionsBootstrapper)),
292 *collect_rules(),
293 ]
294
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/python/pants/engine/streaming_workunit_handler.py b/src/python/pants/engine/streaming_workunit_handler.py
--- a/src/python/pants/engine/streaming_workunit_handler.py
+++ b/src/python/pants/engine/streaming_workunit_handler.py
@@ -12,7 +12,7 @@
from pants.base.specs import Specs
from pants.engine.addresses import Addresses
from pants.engine.fs import Digest, DigestContents, FileDigest, Snapshot
-from pants.engine.internals import native_engine
+from pants.engine.internals.native_engine import PyThreadLocals
from pants.engine.internals.scheduler import SchedulerSession, Workunit
from pants.engine.internals.selectors import Params
from pants.engine.rules import Get, MultiGet, QueryRule, collect_rules, rule
@@ -30,6 +30,24 @@
# -----------------------------------------------------------------------------------------------
+def thread_locals_get_for_current_thread() -> PyThreadLocals:
+ """Gets the engine's thread local state for the current thread.
+
+ In order to safely use StreamingWorkunitContext methods from additional threads,
+ StreamingWorkunit plugins should propagate thread local state from the threads that they are
+ initialized on to any additional threads that they spawn.
+ """
+ return PyThreadLocals.get_for_current_thread()
+
+
+def thread_locals_set_for_current_thread(thread_locals: PyThreadLocals) -> None:
+ """Sets the engine's thread local state for the current thread.
+
+ See `thread_locals_get`.
+ """
+ thread_locals.set_for_current_thread()
+
+
@dataclass(frozen=True)
class TargetInfo:
filename: str
@@ -246,9 +264,9 @@
self.block_until_complete = not allow_async_completion or any(
callback.can_finish_async is False for callback in self.callbacks
)
- # Get the parent thread's logging destination. Note that this thread has not yet started
+ # Get the parent thread's thread locals. Note that this thread has not yet started
# as we are only in the constructor.
- self.logging_destination = native_engine.stdio_thread_get_destination()
+ self.thread_locals = PyThreadLocals.get_for_current_thread()
def poll_workunits(self, *, finished: bool) -> None:
workunits = self.scheduler.poll_workunits(self.max_workunit_verbosity)
@@ -261,8 +279,9 @@
)
def run(self) -> None:
- # First, set the thread's logging destination to the parent thread's, meaning the console.
- native_engine.stdio_thread_set_destination(self.logging_destination)
+ # First, set the thread's thread locals to the parent thread's in order to propagate the
+ # console, workunit stores, etc.
+ self.thread_locals.set_for_current_thread()
while not self.stop_request.isSet(): # type: ignore[attr-defined]
self.poll_workunits(finished=False)
self.stop_request.wait(timeout=self.report_interval)
|
{"golden_diff": "diff --git a/src/python/pants/engine/streaming_workunit_handler.py b/src/python/pants/engine/streaming_workunit_handler.py\n--- a/src/python/pants/engine/streaming_workunit_handler.py\n+++ b/src/python/pants/engine/streaming_workunit_handler.py\n@@ -12,7 +12,7 @@\n from pants.base.specs import Specs\n from pants.engine.addresses import Addresses\n from pants.engine.fs import Digest, DigestContents, FileDigest, Snapshot\n-from pants.engine.internals import native_engine\n+from pants.engine.internals.native_engine import PyThreadLocals\n from pants.engine.internals.scheduler import SchedulerSession, Workunit\n from pants.engine.internals.selectors import Params\n from pants.engine.rules import Get, MultiGet, QueryRule, collect_rules, rule\n@@ -30,6 +30,24 @@\n # -----------------------------------------------------------------------------------------------\n \n \n+def thread_locals_get_for_current_thread() -> PyThreadLocals:\n+ \"\"\"Gets the engine's thread local state for the current thread.\n+\n+ In order to safely use StreamingWorkunitContext methods from additional threads,\n+ StreamingWorkunit plugins should propagate thread local state from the threads that they are\n+ initialized on to any additional threads that they spawn.\n+ \"\"\"\n+ return PyThreadLocals.get_for_current_thread()\n+\n+\n+def thread_locals_set_for_current_thread(thread_locals: PyThreadLocals) -> None:\n+ \"\"\"Sets the engine's thread local state for the current thread.\n+\n+ See `thread_locals_get`.\n+ \"\"\"\n+ thread_locals.set_for_current_thread()\n+\n+\n @dataclass(frozen=True)\n class TargetInfo:\n filename: str\n@@ -246,9 +264,9 @@\n self.block_until_complete = not allow_async_completion or any(\n callback.can_finish_async is False for callback in self.callbacks\n )\n- # Get the parent thread's logging destination. Note that this thread has not yet started\n+ # Get the parent thread's thread locals. Note that this thread has not yet started\n # as we are only in the constructor.\n- self.logging_destination = native_engine.stdio_thread_get_destination()\n+ self.thread_locals = PyThreadLocals.get_for_current_thread()\n \n def poll_workunits(self, *, finished: bool) -> None:\n workunits = self.scheduler.poll_workunits(self.max_workunit_verbosity)\n@@ -261,8 +279,9 @@\n )\n \n def run(self) -> None:\n- # First, set the thread's logging destination to the parent thread's, meaning the console.\n- native_engine.stdio_thread_set_destination(self.logging_destination)\n+ # First, set the thread's thread locals to the parent thread's in order to propagate the\n+ # console, workunit stores, etc.\n+ self.thread_locals.set_for_current_thread()\n while not self.stop_request.isSet(): # type: ignore[attr-defined]\n self.poll_workunits(finished=False)\n self.stop_request.wait(timeout=self.report_interval)\n", "issue": "Adjust plugin thread-local APIs to account for `WorkunitStore` state\nWhen `eager_fetch=False`, it's possible that a workunit's \"artifacts\" contain `Digest`s which haven't actually been fetched. When that's the case for a `Digest`, and a `StreamingWorkunit` plugin is using any of the [context methods which fetch files](https://github.com/pantsbuild/pants/blob/1d8205538a2932badcc1738fb1288600908b01e1/src/python/pants/engine/streaming_workunit_handler.py#L55-L69) from a background thread, they will encounter a:\r\n> A WorkunitStore has not been set for this thread.\r\n\r\n...error. That's because our existing `native_engine.stdio_thread_set_destination` statics only set the thread local `stdio` state, and not also our workunit state.\r\n\r\n----\r\n\r\nTo fix this, we should adjust the existing method to additionally set the workunit store. But we should also deprecate the existing method and add a new one with a more accurate name (replacing #12295).\n", "before_files": [{"content": "# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nimport logging\nimport threading\nfrom abc import ABC, abstractmethod\nfrom dataclasses import dataclass\nfrom typing import Any, Callable, Iterable, Sequence, Tuple\n\nfrom pants.base.specs import Specs\nfrom pants.engine.addresses import Addresses\nfrom pants.engine.fs import Digest, DigestContents, FileDigest, Snapshot\nfrom pants.engine.internals import native_engine\nfrom pants.engine.internals.scheduler import SchedulerSession, Workunit\nfrom pants.engine.internals.selectors import Params\nfrom pants.engine.rules import Get, MultiGet, QueryRule, collect_rules, rule\nfrom pants.engine.target import Targets\nfrom pants.engine.unions import UnionMembership, union\nfrom pants.goal.run_tracker import RunTracker\nfrom pants.option.options_bootstrapper import OptionsBootstrapper\nfrom pants.util.logging import LogLevel\n\nlogger = logging.getLogger(__name__)\n\n\n# -----------------------------------------------------------------------------------------------\n# Streaming workunits plugin API\n# -----------------------------------------------------------------------------------------------\n\n\n@dataclass(frozen=True)\nclass TargetInfo:\n filename: str\n\n\n@dataclass(frozen=True)\nclass ExpandedSpecs:\n targets: dict[str, list[TargetInfo]]\n\n\n@dataclass(frozen=True)\nclass StreamingWorkunitContext:\n _scheduler: SchedulerSession\n _run_tracker: RunTracker\n _specs: Specs\n _options_bootstrapper: OptionsBootstrapper\n\n @property\n def run_tracker(self) -> RunTracker:\n \"\"\"Returns the RunTracker for the current run of Pants.\"\"\"\n return self._run_tracker\n\n def single_file_digests_to_bytes(self, digests: Sequence[FileDigest]) -> list[bytes]:\n \"\"\"Return `bytes` for each `FileDigest`.\"\"\"\n return self._scheduler.single_file_digests_to_bytes(digests)\n\n def snapshots_to_file_contents(\n self, snapshots: Sequence[Snapshot]\n ) -> tuple[DigestContents, ...]:\n \"\"\"Given a sequence of Snapshot objects, return a tuple of DigestContents representing the\n files contained in those `Snapshot`s in sequence.\"\"\"\n return self._scheduler.snapshots_to_file_contents(snapshots)\n\n def ensure_remote_has_recursive(self, digests: Sequence[Digest | FileDigest]) -> None:\n \"\"\"Invoke the internal ensure_remote_has_recursive function, which ensures that a remote\n ByteStore, if it exists, has a copy of the files fingerprinted by each Digest.\"\"\"\n return self._scheduler.ensure_remote_has_recursive(digests)\n\n def get_metrics(self) -> dict[str, int]:\n \"\"\"Invoke the internal get_metrics function, which returns metrics for the Session.\"\"\"\n return self._scheduler.get_metrics()\n\n def get_observation_histograms(self) -> dict[str, Any]:\n \"\"\"Invoke the internal get_observation_histograms function, which serializes histograms\n generated from Pants-internal observation metrics observed during the current run of Pants.\n\n These metrics are useful for debugging Pants internals.\n \"\"\"\n return self._scheduler.get_observation_histograms()\n\n def get_expanded_specs(self) -> ExpandedSpecs:\n \"\"\"Return a dict containing the canonicalized addresses of the specs for this run, and what\n files they expand to.\"\"\"\n\n (unexpanded_addresses,) = self._scheduler.product_request(\n Addresses, [Params(self._specs, self._options_bootstrapper)]\n )\n\n expanded_targets = self._scheduler.product_request(\n Targets, [Params(Addresses([addr])) for addr in unexpanded_addresses]\n )\n targets_dict: dict[str, list[TargetInfo]] = {}\n for addr, targets in zip(unexpanded_addresses, expanded_targets):\n targets_dict[addr.spec] = [\n TargetInfo(\n filename=(\n tgt.address.filename if tgt.address.is_file_target else str(tgt.address)\n )\n )\n for tgt in targets\n ]\n return ExpandedSpecs(targets=targets_dict)\n\n\nclass WorkunitsCallback(ABC):\n @abstractmethod\n def __call__(\n self,\n *,\n started_workunits: tuple[Workunit, ...],\n completed_workunits: tuple[Workunit, ...],\n finished: bool,\n context: StreamingWorkunitContext,\n ) -> None:\n \"\"\"\n :started_workunits: Workunits that have started but not completed.\n :completed_workunits: Workunits that have completed.\n :finished: True when the last chunk of workunit data is reported to the callback.\n :context: A context providing access to functionality relevant to the run.\n \"\"\"\n\n @property\n @abstractmethod\n def can_finish_async(self) -> bool:\n \"\"\"Can this callback finish its work in the background after the Pants run has already\n completed?\n\n The main reason to `return False` is if your callback logs in its final call, when\n `finished=True`, as it may end up logging to `.pantsd.d/pants.log` instead of the console,\n which is harder for users to find. Otherwise, most callbacks should return `True` to avoid\n slowing down Pants from finishing the run.\n \"\"\"\n\n\n@dataclass(frozen=True)\nclass WorkunitsCallbackFactory:\n \"\"\"A wrapper around a callable that constructs WorkunitsCallbacks.\n\n NB: This extra wrapping is because subtyping is not supported in the return position of a\n rule. See #11354 for discussion of that limitation.\n \"\"\"\n\n callback_factory: Callable[[], WorkunitsCallback | None]\n\n\nclass WorkunitsCallbackFactories(Tuple[WorkunitsCallbackFactory, ...]):\n \"\"\"A list of registered factories for WorkunitsCallback instances.\"\"\"\n\n\n@union\nclass WorkunitsCallbackFactoryRequest:\n \"\"\"A request for a particular WorkunitsCallbackFactory.\"\"\"\n\n\n@rule\nasync def construct_workunits_callback_factories(\n union_membership: UnionMembership,\n) -> WorkunitsCallbackFactories:\n request_types = union_membership.get(WorkunitsCallbackFactoryRequest)\n workunit_callback_factories = await MultiGet(\n Get(WorkunitsCallbackFactory, WorkunitsCallbackFactoryRequest, request_type())\n for request_type in request_types\n )\n return WorkunitsCallbackFactories(workunit_callback_factories)\n\n\n# -----------------------------------------------------------------------------------------------\n# Streaming workunits handler\n# -----------------------------------------------------------------------------------------------\n\n\nclass StreamingWorkunitHandler:\n \"\"\"Periodically calls each registered WorkunitsCallback in a dedicated thread.\n\n This class should be used as a context manager.\n \"\"\"\n\n def __init__(\n self,\n scheduler: SchedulerSession,\n run_tracker: RunTracker,\n callbacks: Iterable[WorkunitsCallback],\n options_bootstrapper: OptionsBootstrapper,\n specs: Specs,\n report_interval_seconds: float,\n allow_async_completion: bool,\n max_workunit_verbosity: LogLevel,\n ) -> None:\n scheduler = scheduler.isolated_shallow_clone(\"streaming_workunit_handler_session\")\n self.callbacks = callbacks\n self.context = StreamingWorkunitContext(\n _scheduler=scheduler,\n _run_tracker=run_tracker,\n _specs=specs,\n _options_bootstrapper=options_bootstrapper,\n )\n self.thread_runner = (\n _InnerHandler(\n scheduler=scheduler,\n context=self.context,\n callbacks=self.callbacks,\n report_interval=report_interval_seconds,\n # TODO(10092) The max verbosity should be a per-client setting, rather than a global\n # setting.\n max_workunit_verbosity=max_workunit_verbosity,\n allow_async_completion=allow_async_completion,\n )\n if callbacks\n else None\n )\n\n def __enter__(self) -> None:\n if not self.thread_runner:\n return\n self.thread_runner.start()\n\n def __exit__(self, exc_type, exc_value, traceback) -> None:\n if not self.thread_runner:\n return\n self.thread_runner.end()\n if exc_type is not None:\n self.thread_runner.join()\n\n\nclass _InnerHandler(threading.Thread):\n def __init__(\n self,\n scheduler: Any,\n context: StreamingWorkunitContext,\n callbacks: Iterable[WorkunitsCallback],\n report_interval: float,\n max_workunit_verbosity: LogLevel,\n allow_async_completion: bool,\n ) -> None:\n super().__init__(daemon=True)\n self.scheduler = scheduler\n self.context = context\n self.stop_request = threading.Event()\n self.report_interval = report_interval\n self.callbacks = callbacks\n self.max_workunit_verbosity = max_workunit_verbosity\n # TODO: Have a thread per callback so that some callbacks can always finish async even\n # if others must be finished synchronously.\n self.block_until_complete = not allow_async_completion or any(\n callback.can_finish_async is False for callback in self.callbacks\n )\n # Get the parent thread's logging destination. Note that this thread has not yet started\n # as we are only in the constructor.\n self.logging_destination = native_engine.stdio_thread_get_destination()\n\n def poll_workunits(self, *, finished: bool) -> None:\n workunits = self.scheduler.poll_workunits(self.max_workunit_verbosity)\n for callback in self.callbacks:\n callback(\n started_workunits=workunits[\"started\"],\n completed_workunits=workunits[\"completed\"],\n finished=finished,\n context=self.context,\n )\n\n def run(self) -> None:\n # First, set the thread's logging destination to the parent thread's, meaning the console.\n native_engine.stdio_thread_set_destination(self.logging_destination)\n while not self.stop_request.isSet(): # type: ignore[attr-defined]\n self.poll_workunits(finished=False)\n self.stop_request.wait(timeout=self.report_interval)\n else:\n # Make one final call. Note that this may run after the Pants run has already\n # completed, depending on whether the thread was joined or not.\n self.poll_workunits(finished=True)\n\n def end(self) -> None:\n self.stop_request.set()\n if self.block_until_complete:\n logger.debug(\n \"Async completion is disabled: waiting for workunit callbacks to complete...\"\n )\n super().join()\n else:\n logger.debug(\n \"Async completion is enabled: workunit callbacks will complete in the background.\"\n )\n\n\ndef rules():\n return [\n QueryRule(WorkunitsCallbackFactories, (UnionMembership,)),\n QueryRule(Targets, (Addresses,)),\n QueryRule(Addresses, (Specs, OptionsBootstrapper)),\n *collect_rules(),\n ]\n", "path": "src/python/pants/engine/streaming_workunit_handler.py"}], "after_files": [{"content": "# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nimport logging\nimport threading\nfrom abc import ABC, abstractmethod\nfrom dataclasses import dataclass\nfrom typing import Any, Callable, Iterable, Sequence, Tuple\n\nfrom pants.base.specs import Specs\nfrom pants.engine.addresses import Addresses\nfrom pants.engine.fs import Digest, DigestContents, FileDigest, Snapshot\nfrom pants.engine.internals.native_engine import PyThreadLocals\nfrom pants.engine.internals.scheduler import SchedulerSession, Workunit\nfrom pants.engine.internals.selectors import Params\nfrom pants.engine.rules import Get, MultiGet, QueryRule, collect_rules, rule\nfrom pants.engine.target import Targets\nfrom pants.engine.unions import UnionMembership, union\nfrom pants.goal.run_tracker import RunTracker\nfrom pants.option.options_bootstrapper import OptionsBootstrapper\nfrom pants.util.logging import LogLevel\n\nlogger = logging.getLogger(__name__)\n\n\n# -----------------------------------------------------------------------------------------------\n# Streaming workunits plugin API\n# -----------------------------------------------------------------------------------------------\n\n\ndef thread_locals_get_for_current_thread() -> PyThreadLocals:\n \"\"\"Gets the engine's thread local state for the current thread.\n\n In order to safely use StreamingWorkunitContext methods from additional threads,\n StreamingWorkunit plugins should propagate thread local state from the threads that they are\n initialized on to any additional threads that they spawn.\n \"\"\"\n return PyThreadLocals.get_for_current_thread()\n\n\ndef thread_locals_set_for_current_thread(thread_locals: PyThreadLocals) -> None:\n \"\"\"Sets the engine's thread local state for the current thread.\n\n See `thread_locals_get`.\n \"\"\"\n thread_locals.set_for_current_thread()\n\n\n@dataclass(frozen=True)\nclass TargetInfo:\n filename: str\n\n\n@dataclass(frozen=True)\nclass ExpandedSpecs:\n targets: dict[str, list[TargetInfo]]\n\n\n@dataclass(frozen=True)\nclass StreamingWorkunitContext:\n _scheduler: SchedulerSession\n _run_tracker: RunTracker\n _specs: Specs\n _options_bootstrapper: OptionsBootstrapper\n\n @property\n def run_tracker(self) -> RunTracker:\n \"\"\"Returns the RunTracker for the current run of Pants.\"\"\"\n return self._run_tracker\n\n def single_file_digests_to_bytes(self, digests: Sequence[FileDigest]) -> list[bytes]:\n \"\"\"Return `bytes` for each `FileDigest`.\"\"\"\n return self._scheduler.single_file_digests_to_bytes(digests)\n\n def snapshots_to_file_contents(\n self, snapshots: Sequence[Snapshot]\n ) -> tuple[DigestContents, ...]:\n \"\"\"Given a sequence of Snapshot objects, return a tuple of DigestContents representing the\n files contained in those `Snapshot`s in sequence.\"\"\"\n return self._scheduler.snapshots_to_file_contents(snapshots)\n\n def ensure_remote_has_recursive(self, digests: Sequence[Digest | FileDigest]) -> None:\n \"\"\"Invoke the internal ensure_remote_has_recursive function, which ensures that a remote\n ByteStore, if it exists, has a copy of the files fingerprinted by each Digest.\"\"\"\n return self._scheduler.ensure_remote_has_recursive(digests)\n\n def get_metrics(self) -> dict[str, int]:\n \"\"\"Invoke the internal get_metrics function, which returns metrics for the Session.\"\"\"\n return self._scheduler.get_metrics()\n\n def get_observation_histograms(self) -> dict[str, Any]:\n \"\"\"Invoke the internal get_observation_histograms function, which serializes histograms\n generated from Pants-internal observation metrics observed during the current run of Pants.\n\n These metrics are useful for debugging Pants internals.\n \"\"\"\n return self._scheduler.get_observation_histograms()\n\n def get_expanded_specs(self) -> ExpandedSpecs:\n \"\"\"Return a dict containing the canonicalized addresses of the specs for this run, and what\n files they expand to.\"\"\"\n\n (unexpanded_addresses,) = self._scheduler.product_request(\n Addresses, [Params(self._specs, self._options_bootstrapper)]\n )\n\n expanded_targets = self._scheduler.product_request(\n Targets, [Params(Addresses([addr])) for addr in unexpanded_addresses]\n )\n targets_dict: dict[str, list[TargetInfo]] = {}\n for addr, targets in zip(unexpanded_addresses, expanded_targets):\n targets_dict[addr.spec] = [\n TargetInfo(\n filename=(\n tgt.address.filename if tgt.address.is_file_target else str(tgt.address)\n )\n )\n for tgt in targets\n ]\n return ExpandedSpecs(targets=targets_dict)\n\n\nclass WorkunitsCallback(ABC):\n @abstractmethod\n def __call__(\n self,\n *,\n started_workunits: tuple[Workunit, ...],\n completed_workunits: tuple[Workunit, ...],\n finished: bool,\n context: StreamingWorkunitContext,\n ) -> None:\n \"\"\"\n :started_workunits: Workunits that have started but not completed.\n :completed_workunits: Workunits that have completed.\n :finished: True when the last chunk of workunit data is reported to the callback.\n :context: A context providing access to functionality relevant to the run.\n \"\"\"\n\n @property\n @abstractmethod\n def can_finish_async(self) -> bool:\n \"\"\"Can this callback finish its work in the background after the Pants run has already\n completed?\n\n The main reason to `return False` is if your callback logs in its final call, when\n `finished=True`, as it may end up logging to `.pantsd.d/pants.log` instead of the console,\n which is harder for users to find. Otherwise, most callbacks should return `True` to avoid\n slowing down Pants from finishing the run.\n \"\"\"\n\n\n@dataclass(frozen=True)\nclass WorkunitsCallbackFactory:\n \"\"\"A wrapper around a callable that constructs WorkunitsCallbacks.\n\n NB: This extra wrapping is because subtyping is not supported in the return position of a\n rule. See #11354 for discussion of that limitation.\n \"\"\"\n\n callback_factory: Callable[[], WorkunitsCallback | None]\n\n\nclass WorkunitsCallbackFactories(Tuple[WorkunitsCallbackFactory, ...]):\n \"\"\"A list of registered factories for WorkunitsCallback instances.\"\"\"\n\n\n@union\nclass WorkunitsCallbackFactoryRequest:\n \"\"\"A request for a particular WorkunitsCallbackFactory.\"\"\"\n\n\n@rule\nasync def construct_workunits_callback_factories(\n union_membership: UnionMembership,\n) -> WorkunitsCallbackFactories:\n request_types = union_membership.get(WorkunitsCallbackFactoryRequest)\n workunit_callback_factories = await MultiGet(\n Get(WorkunitsCallbackFactory, WorkunitsCallbackFactoryRequest, request_type())\n for request_type in request_types\n )\n return WorkunitsCallbackFactories(workunit_callback_factories)\n\n\n# -----------------------------------------------------------------------------------------------\n# Streaming workunits handler\n# -----------------------------------------------------------------------------------------------\n\n\nclass StreamingWorkunitHandler:\n \"\"\"Periodically calls each registered WorkunitsCallback in a dedicated thread.\n\n This class should be used as a context manager.\n \"\"\"\n\n def __init__(\n self,\n scheduler: SchedulerSession,\n run_tracker: RunTracker,\n callbacks: Iterable[WorkunitsCallback],\n options_bootstrapper: OptionsBootstrapper,\n specs: Specs,\n report_interval_seconds: float,\n allow_async_completion: bool,\n max_workunit_verbosity: LogLevel,\n ) -> None:\n scheduler = scheduler.isolated_shallow_clone(\"streaming_workunit_handler_session\")\n self.callbacks = callbacks\n self.context = StreamingWorkunitContext(\n _scheduler=scheduler,\n _run_tracker=run_tracker,\n _specs=specs,\n _options_bootstrapper=options_bootstrapper,\n )\n self.thread_runner = (\n _InnerHandler(\n scheduler=scheduler,\n context=self.context,\n callbacks=self.callbacks,\n report_interval=report_interval_seconds,\n # TODO(10092) The max verbosity should be a per-client setting, rather than a global\n # setting.\n max_workunit_verbosity=max_workunit_verbosity,\n allow_async_completion=allow_async_completion,\n )\n if callbacks\n else None\n )\n\n def __enter__(self) -> None:\n if not self.thread_runner:\n return\n self.thread_runner.start()\n\n def __exit__(self, exc_type, exc_value, traceback) -> None:\n if not self.thread_runner:\n return\n self.thread_runner.end()\n if exc_type is not None:\n self.thread_runner.join()\n\n\nclass _InnerHandler(threading.Thread):\n def __init__(\n self,\n scheduler: Any,\n context: StreamingWorkunitContext,\n callbacks: Iterable[WorkunitsCallback],\n report_interval: float,\n max_workunit_verbosity: LogLevel,\n allow_async_completion: bool,\n ) -> None:\n super().__init__(daemon=True)\n self.scheduler = scheduler\n self.context = context\n self.stop_request = threading.Event()\n self.report_interval = report_interval\n self.callbacks = callbacks\n self.max_workunit_verbosity = max_workunit_verbosity\n # TODO: Have a thread per callback so that some callbacks can always finish async even\n # if others must be finished synchronously.\n self.block_until_complete = not allow_async_completion or any(\n callback.can_finish_async is False for callback in self.callbacks\n )\n # Get the parent thread's thread locals. Note that this thread has not yet started\n # as we are only in the constructor.\n self.thread_locals = PyThreadLocals.get_for_current_thread()\n\n def poll_workunits(self, *, finished: bool) -> None:\n workunits = self.scheduler.poll_workunits(self.max_workunit_verbosity)\n for callback in self.callbacks:\n callback(\n started_workunits=workunits[\"started\"],\n completed_workunits=workunits[\"completed\"],\n finished=finished,\n context=self.context,\n )\n\n def run(self) -> None:\n # First, set the thread's thread locals to the parent thread's in order to propagate the\n # console, workunit stores, etc.\n self.thread_locals.set_for_current_thread()\n while not self.stop_request.isSet(): # type: ignore[attr-defined]\n self.poll_workunits(finished=False)\n self.stop_request.wait(timeout=self.report_interval)\n else:\n # Make one final call. Note that this may run after the Pants run has already\n # completed, depending on whether the thread was joined or not.\n self.poll_workunits(finished=True)\n\n def end(self) -> None:\n self.stop_request.set()\n if self.block_until_complete:\n logger.debug(\n \"Async completion is disabled: waiting for workunit callbacks to complete...\"\n )\n super().join()\n else:\n logger.debug(\n \"Async completion is enabled: workunit callbacks will complete in the background.\"\n )\n\n\ndef rules():\n return [\n QueryRule(WorkunitsCallbackFactories, (UnionMembership,)),\n QueryRule(Targets, (Addresses,)),\n QueryRule(Addresses, (Specs, OptionsBootstrapper)),\n *collect_rules(),\n ]\n", "path": "src/python/pants/engine/streaming_workunit_handler.py"}]}
| 3,524 | 639 |
gh_patches_debug_13649
|
rasdani/github-patches
|
git_diff
|
iterative__dvc-4196
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
remove: remove dvc.yaml and dvc.lock if they are empty
https://github.com/iterative/dvc/pull/4074#issuecomment-648097445
```
$ cat dvc.lock
{}
$ cat dvc.yaml
stages: {}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/dvcfile.py`
Content:
```
1 import collections
2 import contextlib
3 import logging
4 import os
5
6 from voluptuous import MultipleInvalid
7
8 from dvc.exceptions import DvcException
9 from dvc.stage import serialize
10 from dvc.stage.exceptions import (
11 StageFileBadNameError,
12 StageFileDoesNotExistError,
13 StageFileFormatError,
14 StageFileIsNotDvcFileError,
15 )
16 from dvc.stage.loader import SingleStageLoader, StageLoader
17 from dvc.utils import relpath
18 from dvc.utils.collections import apply_diff
19 from dvc.utils.yaml import dump_yaml, parse_yaml, parse_yaml_for_update
20
21 logger = logging.getLogger(__name__)
22
23 DVC_FILE = "Dvcfile"
24 DVC_FILE_SUFFIX = ".dvc"
25 PIPELINE_FILE = "dvc.yaml"
26 PIPELINE_LOCK = "dvc.lock"
27
28
29 class LockfileCorruptedError(DvcException):
30 pass
31
32
33 def is_valid_filename(path):
34 return path.endswith(DVC_FILE_SUFFIX) or os.path.basename(path) in [
35 DVC_FILE,
36 PIPELINE_FILE,
37 ]
38
39
40 def is_dvc_file(path):
41 return os.path.isfile(path) and (
42 is_valid_filename(path) or os.path.basename(path) == PIPELINE_LOCK
43 )
44
45
46 def check_dvc_filename(path):
47 if not is_valid_filename(path):
48 raise StageFileBadNameError(
49 "bad DVC-file name '{}'. DVC-files should be named "
50 "'Dvcfile' or have a '.dvc' suffix (e.g. '{}.dvc').".format(
51 relpath(path), os.path.basename(path)
52 )
53 )
54
55
56 class FileMixin:
57 SCHEMA = None
58
59 def __init__(self, repo, path, **kwargs):
60 self.repo = repo
61 self.path = path
62
63 def __repr__(self):
64 return "{}: {}".format(
65 self.__class__.__name__, relpath(self.path, self.repo.root_dir)
66 )
67
68 def __hash__(self):
69 return hash(self.path)
70
71 def __eq__(self, other):
72 return self.repo == other.repo and os.path.abspath(
73 self.path
74 ) == os.path.abspath(other.path)
75
76 def __str__(self):
77 return f"{self.__class__.__name__}: {self.relpath}"
78
79 @property
80 def relpath(self):
81 return relpath(self.path)
82
83 def exists(self):
84 return self.repo.tree.exists(self.path)
85
86 def _load(self):
87 # it raises the proper exceptions by priority:
88 # 1. when the file doesn't exists
89 # 2. filename is not a DVC-file
90 # 3. path doesn't represent a regular file
91 if not self.exists():
92 raise StageFileDoesNotExistError(self.path)
93 check_dvc_filename(self.path)
94 if not self.repo.tree.isfile(self.path):
95 raise StageFileIsNotDvcFileError(self.path)
96
97 with self.repo.tree.open(self.path) as fd:
98 stage_text = fd.read()
99 d = parse_yaml(stage_text, self.path)
100 self.validate(d, self.relpath)
101 return d, stage_text
102
103 @classmethod
104 def validate(cls, d, fname=None):
105 assert isinstance(cls.SCHEMA, collections.abc.Callable)
106 try:
107 cls.SCHEMA(d) # pylint: disable=not-callable
108 except MultipleInvalid as exc:
109 raise StageFileFormatError(f"'{fname}' format error: {exc}")
110
111 def remove(self, force=False): # pylint: disable=unused-argument
112 with contextlib.suppress(FileNotFoundError):
113 os.unlink(self.path)
114
115 def dump(self, stage, **kwargs):
116 raise NotImplementedError
117
118
119 class SingleStageFile(FileMixin):
120 from dvc.schema import COMPILED_SINGLE_STAGE_SCHEMA as SCHEMA
121
122 @property
123 def stage(self):
124 data, raw = self._load()
125 return SingleStageLoader.load_stage(self, data, raw)
126
127 @property
128 def stages(self):
129 data, raw = self._load()
130 return SingleStageLoader(self, data, raw)
131
132 def dump(self, stage, **kwargs):
133 """Dumps given stage appropriately in the dvcfile."""
134 from dvc.stage import PipelineStage
135
136 assert not isinstance(stage, PipelineStage)
137 check_dvc_filename(self.path)
138 logger.debug(
139 "Saving information to '{file}'.".format(file=relpath(self.path))
140 )
141 dump_yaml(self.path, serialize.to_single_stage_file(stage))
142 self.repo.scm.track_file(self.relpath)
143
144 def remove_stage(self, stage): # pylint: disable=unused-argument
145 self.remove()
146
147
148 class PipelineFile(FileMixin):
149 """Abstraction for pipelines file, .yaml + .lock combined."""
150
151 from dvc.schema import COMPILED_MULTI_STAGE_SCHEMA as SCHEMA
152
153 @property
154 def _lockfile(self):
155 return Lockfile(self.repo, os.path.splitext(self.path)[0] + ".lock")
156
157 def dump(
158 self, stage, update_pipeline=False, no_lock=False, **kwargs
159 ): # pylint: disable=arguments-differ
160 """Dumps given stage appropriately in the dvcfile."""
161 from dvc.stage import PipelineStage
162
163 assert isinstance(stage, PipelineStage)
164 check_dvc_filename(self.path)
165
166 if update_pipeline and not stage.is_data_source:
167 self._dump_pipeline_file(stage)
168
169 if not no_lock:
170 self._dump_lockfile(stage)
171
172 def _dump_lockfile(self, stage):
173 self._lockfile.dump(stage)
174
175 def _dump_pipeline_file(self, stage):
176 data = {}
177 if self.exists():
178 with open(self.path) as fd:
179 data = parse_yaml_for_update(fd.read(), self.path)
180 else:
181 logger.info("Creating '%s'", self.relpath)
182 open(self.path, "w+").close()
183
184 data["stages"] = data.get("stages", {})
185 stage_data = serialize.to_pipeline_file(stage)
186 existing_entry = stage.name in data["stages"]
187
188 action = "Modifying" if existing_entry else "Adding"
189 logger.info("%s stage '%s' in '%s'", action, stage.name, self.relpath)
190
191 if existing_entry:
192 orig_stage_data = data["stages"][stage.name]
193 if "meta" in orig_stage_data:
194 stage_data[stage.name]["meta"] = orig_stage_data["meta"]
195 apply_diff(stage_data[stage.name], orig_stage_data)
196 else:
197 data["stages"].update(stage_data)
198
199 dump_yaml(self.path, data)
200 self.repo.scm.track_file(self.relpath)
201
202 @property
203 def stage(self):
204 raise DvcException(
205 "PipelineFile has multiple stages. Please specify it's name."
206 )
207
208 @property
209 def stages(self):
210 data, _ = self._load()
211 lockfile_data = self._lockfile.load()
212 return StageLoader(self, data.get("stages", {}), lockfile_data)
213
214 def remove(self, force=False):
215 if not force:
216 logger.warning("Cannot remove pipeline file.")
217 return
218
219 super().remove()
220 self._lockfile.remove()
221
222 def remove_stage(self, stage):
223 self._lockfile.remove_stage(stage)
224 if not self.exists():
225 return
226
227 with open(self.path, "r") as f:
228 d = parse_yaml_for_update(f.read(), self.path)
229
230 self.validate(d, self.path)
231 if stage.name not in d.get("stages", {}):
232 return
233
234 logger.debug("Removing '%s' from '%s'", stage.name, self.path)
235 del d["stages"][stage.name]
236 dump_yaml(self.path, d)
237
238
239 class Lockfile(FileMixin):
240 from dvc.schema import COMPILED_LOCKFILE_SCHEMA as SCHEMA
241
242 def load(self):
243 if not self.exists():
244 return {}
245 with self.repo.tree.open(self.path) as fd:
246 data = parse_yaml(fd.read(), self.path)
247 try:
248 self.validate(data, fname=self.relpath)
249 except StageFileFormatError:
250 raise LockfileCorruptedError(
251 f"Lockfile '{self.relpath}' is corrupted."
252 )
253 return data
254
255 def dump(self, stage, **kwargs):
256 stage_data = serialize.to_lockfile(stage)
257 if not self.exists():
258 modified = True
259 logger.info("Generating lock file '%s'", self.relpath)
260 data = stage_data
261 open(self.path, "w+").close()
262 else:
263 with self.repo.tree.open(self.path, "r") as fd:
264 data = parse_yaml_for_update(fd.read(), self.path)
265 modified = data.get(stage.name, {}) != stage_data.get(
266 stage.name, {}
267 )
268 if modified:
269 logger.info("Updating lock file '%s'", self.relpath)
270 data.update(stage_data)
271 dump_yaml(self.path, data)
272 if modified:
273 self.repo.scm.track_file(self.relpath)
274
275 def remove_stage(self, stage):
276 if not self.exists():
277 return
278
279 with open(self.path) as f:
280 d = parse_yaml_for_update(f.read(), self.path)
281 self.validate(d, self.path)
282
283 if stage.name not in d:
284 return
285
286 logger.debug("Removing '%s' from '%s'", stage.name, self.path)
287 del d[stage.name]
288
289 dump_yaml(self.path, d)
290
291
292 class Dvcfile:
293 def __new__(cls, repo, path, **kwargs):
294 assert path
295 assert repo
296
297 _, ext = os.path.splitext(path)
298 if ext in [".yaml", ".yml"]:
299 return PipelineFile(repo, path, **kwargs)
300 # fallback to single stage file for better error messages
301 return SingleStageFile(repo, path, **kwargs)
302
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dvc/dvcfile.py b/dvc/dvcfile.py
--- a/dvc/dvcfile.py
+++ b/dvc/dvcfile.py
@@ -233,7 +233,11 @@
logger.debug("Removing '%s' from '%s'", stage.name, self.path)
del d["stages"][stage.name]
- dump_yaml(self.path, d)
+
+ if d["stages"]:
+ dump_yaml(self.path, d)
+ else:
+ super().remove()
class Lockfile(FileMixin):
@@ -286,7 +290,10 @@
logger.debug("Removing '%s' from '%s'", stage.name, self.path)
del d[stage.name]
- dump_yaml(self.path, d)
+ if d:
+ dump_yaml(self.path, d)
+ else:
+ self.remove()
class Dvcfile:
|
{"golden_diff": "diff --git a/dvc/dvcfile.py b/dvc/dvcfile.py\n--- a/dvc/dvcfile.py\n+++ b/dvc/dvcfile.py\n@@ -233,7 +233,11 @@\n \n logger.debug(\"Removing '%s' from '%s'\", stage.name, self.path)\n del d[\"stages\"][stage.name]\n- dump_yaml(self.path, d)\n+\n+ if d[\"stages\"]:\n+ dump_yaml(self.path, d)\n+ else:\n+ super().remove()\n \n \n class Lockfile(FileMixin):\n@@ -286,7 +290,10 @@\n logger.debug(\"Removing '%s' from '%s'\", stage.name, self.path)\n del d[stage.name]\n \n- dump_yaml(self.path, d)\n+ if d:\n+ dump_yaml(self.path, d)\n+ else:\n+ self.remove()\n \n \n class Dvcfile:\n", "issue": "remove: remove dvc.yaml and dvc.lock if they are empty\nhttps://github.com/iterative/dvc/pull/4074#issuecomment-648097445\r\n\r\n```\r\n$ cat dvc.lock\r\n{} \r\n$ cat dvc.yaml\r\nstages: {} \r\n```\n", "before_files": [{"content": "import collections\nimport contextlib\nimport logging\nimport os\n\nfrom voluptuous import MultipleInvalid\n\nfrom dvc.exceptions import DvcException\nfrom dvc.stage import serialize\nfrom dvc.stage.exceptions import (\n StageFileBadNameError,\n StageFileDoesNotExistError,\n StageFileFormatError,\n StageFileIsNotDvcFileError,\n)\nfrom dvc.stage.loader import SingleStageLoader, StageLoader\nfrom dvc.utils import relpath\nfrom dvc.utils.collections import apply_diff\nfrom dvc.utils.yaml import dump_yaml, parse_yaml, parse_yaml_for_update\n\nlogger = logging.getLogger(__name__)\n\nDVC_FILE = \"Dvcfile\"\nDVC_FILE_SUFFIX = \".dvc\"\nPIPELINE_FILE = \"dvc.yaml\"\nPIPELINE_LOCK = \"dvc.lock\"\n\n\nclass LockfileCorruptedError(DvcException):\n pass\n\n\ndef is_valid_filename(path):\n return path.endswith(DVC_FILE_SUFFIX) or os.path.basename(path) in [\n DVC_FILE,\n PIPELINE_FILE,\n ]\n\n\ndef is_dvc_file(path):\n return os.path.isfile(path) and (\n is_valid_filename(path) or os.path.basename(path) == PIPELINE_LOCK\n )\n\n\ndef check_dvc_filename(path):\n if not is_valid_filename(path):\n raise StageFileBadNameError(\n \"bad DVC-file name '{}'. DVC-files should be named \"\n \"'Dvcfile' or have a '.dvc' suffix (e.g. '{}.dvc').\".format(\n relpath(path), os.path.basename(path)\n )\n )\n\n\nclass FileMixin:\n SCHEMA = None\n\n def __init__(self, repo, path, **kwargs):\n self.repo = repo\n self.path = path\n\n def __repr__(self):\n return \"{}: {}\".format(\n self.__class__.__name__, relpath(self.path, self.repo.root_dir)\n )\n\n def __hash__(self):\n return hash(self.path)\n\n def __eq__(self, other):\n return self.repo == other.repo and os.path.abspath(\n self.path\n ) == os.path.abspath(other.path)\n\n def __str__(self):\n return f\"{self.__class__.__name__}: {self.relpath}\"\n\n @property\n def relpath(self):\n return relpath(self.path)\n\n def exists(self):\n return self.repo.tree.exists(self.path)\n\n def _load(self):\n # it raises the proper exceptions by priority:\n # 1. when the file doesn't exists\n # 2. filename is not a DVC-file\n # 3. path doesn't represent a regular file\n if not self.exists():\n raise StageFileDoesNotExistError(self.path)\n check_dvc_filename(self.path)\n if not self.repo.tree.isfile(self.path):\n raise StageFileIsNotDvcFileError(self.path)\n\n with self.repo.tree.open(self.path) as fd:\n stage_text = fd.read()\n d = parse_yaml(stage_text, self.path)\n self.validate(d, self.relpath)\n return d, stage_text\n\n @classmethod\n def validate(cls, d, fname=None):\n assert isinstance(cls.SCHEMA, collections.abc.Callable)\n try:\n cls.SCHEMA(d) # pylint: disable=not-callable\n except MultipleInvalid as exc:\n raise StageFileFormatError(f\"'{fname}' format error: {exc}\")\n\n def remove(self, force=False): # pylint: disable=unused-argument\n with contextlib.suppress(FileNotFoundError):\n os.unlink(self.path)\n\n def dump(self, stage, **kwargs):\n raise NotImplementedError\n\n\nclass SingleStageFile(FileMixin):\n from dvc.schema import COMPILED_SINGLE_STAGE_SCHEMA as SCHEMA\n\n @property\n def stage(self):\n data, raw = self._load()\n return SingleStageLoader.load_stage(self, data, raw)\n\n @property\n def stages(self):\n data, raw = self._load()\n return SingleStageLoader(self, data, raw)\n\n def dump(self, stage, **kwargs):\n \"\"\"Dumps given stage appropriately in the dvcfile.\"\"\"\n from dvc.stage import PipelineStage\n\n assert not isinstance(stage, PipelineStage)\n check_dvc_filename(self.path)\n logger.debug(\n \"Saving information to '{file}'.\".format(file=relpath(self.path))\n )\n dump_yaml(self.path, serialize.to_single_stage_file(stage))\n self.repo.scm.track_file(self.relpath)\n\n def remove_stage(self, stage): # pylint: disable=unused-argument\n self.remove()\n\n\nclass PipelineFile(FileMixin):\n \"\"\"Abstraction for pipelines file, .yaml + .lock combined.\"\"\"\n\n from dvc.schema import COMPILED_MULTI_STAGE_SCHEMA as SCHEMA\n\n @property\n def _lockfile(self):\n return Lockfile(self.repo, os.path.splitext(self.path)[0] + \".lock\")\n\n def dump(\n self, stage, update_pipeline=False, no_lock=False, **kwargs\n ): # pylint: disable=arguments-differ\n \"\"\"Dumps given stage appropriately in the dvcfile.\"\"\"\n from dvc.stage import PipelineStage\n\n assert isinstance(stage, PipelineStage)\n check_dvc_filename(self.path)\n\n if update_pipeline and not stage.is_data_source:\n self._dump_pipeline_file(stage)\n\n if not no_lock:\n self._dump_lockfile(stage)\n\n def _dump_lockfile(self, stage):\n self._lockfile.dump(stage)\n\n def _dump_pipeline_file(self, stage):\n data = {}\n if self.exists():\n with open(self.path) as fd:\n data = parse_yaml_for_update(fd.read(), self.path)\n else:\n logger.info(\"Creating '%s'\", self.relpath)\n open(self.path, \"w+\").close()\n\n data[\"stages\"] = data.get(\"stages\", {})\n stage_data = serialize.to_pipeline_file(stage)\n existing_entry = stage.name in data[\"stages\"]\n\n action = \"Modifying\" if existing_entry else \"Adding\"\n logger.info(\"%s stage '%s' in '%s'\", action, stage.name, self.relpath)\n\n if existing_entry:\n orig_stage_data = data[\"stages\"][stage.name]\n if \"meta\" in orig_stage_data:\n stage_data[stage.name][\"meta\"] = orig_stage_data[\"meta\"]\n apply_diff(stage_data[stage.name], orig_stage_data)\n else:\n data[\"stages\"].update(stage_data)\n\n dump_yaml(self.path, data)\n self.repo.scm.track_file(self.relpath)\n\n @property\n def stage(self):\n raise DvcException(\n \"PipelineFile has multiple stages. Please specify it's name.\"\n )\n\n @property\n def stages(self):\n data, _ = self._load()\n lockfile_data = self._lockfile.load()\n return StageLoader(self, data.get(\"stages\", {}), lockfile_data)\n\n def remove(self, force=False):\n if not force:\n logger.warning(\"Cannot remove pipeline file.\")\n return\n\n super().remove()\n self._lockfile.remove()\n\n def remove_stage(self, stage):\n self._lockfile.remove_stage(stage)\n if not self.exists():\n return\n\n with open(self.path, \"r\") as f:\n d = parse_yaml_for_update(f.read(), self.path)\n\n self.validate(d, self.path)\n if stage.name not in d.get(\"stages\", {}):\n return\n\n logger.debug(\"Removing '%s' from '%s'\", stage.name, self.path)\n del d[\"stages\"][stage.name]\n dump_yaml(self.path, d)\n\n\nclass Lockfile(FileMixin):\n from dvc.schema import COMPILED_LOCKFILE_SCHEMA as SCHEMA\n\n def load(self):\n if not self.exists():\n return {}\n with self.repo.tree.open(self.path) as fd:\n data = parse_yaml(fd.read(), self.path)\n try:\n self.validate(data, fname=self.relpath)\n except StageFileFormatError:\n raise LockfileCorruptedError(\n f\"Lockfile '{self.relpath}' is corrupted.\"\n )\n return data\n\n def dump(self, stage, **kwargs):\n stage_data = serialize.to_lockfile(stage)\n if not self.exists():\n modified = True\n logger.info(\"Generating lock file '%s'\", self.relpath)\n data = stage_data\n open(self.path, \"w+\").close()\n else:\n with self.repo.tree.open(self.path, \"r\") as fd:\n data = parse_yaml_for_update(fd.read(), self.path)\n modified = data.get(stage.name, {}) != stage_data.get(\n stage.name, {}\n )\n if modified:\n logger.info(\"Updating lock file '%s'\", self.relpath)\n data.update(stage_data)\n dump_yaml(self.path, data)\n if modified:\n self.repo.scm.track_file(self.relpath)\n\n def remove_stage(self, stage):\n if not self.exists():\n return\n\n with open(self.path) as f:\n d = parse_yaml_for_update(f.read(), self.path)\n self.validate(d, self.path)\n\n if stage.name not in d:\n return\n\n logger.debug(\"Removing '%s' from '%s'\", stage.name, self.path)\n del d[stage.name]\n\n dump_yaml(self.path, d)\n\n\nclass Dvcfile:\n def __new__(cls, repo, path, **kwargs):\n assert path\n assert repo\n\n _, ext = os.path.splitext(path)\n if ext in [\".yaml\", \".yml\"]:\n return PipelineFile(repo, path, **kwargs)\n # fallback to single stage file for better error messages\n return SingleStageFile(repo, path, **kwargs)\n", "path": "dvc/dvcfile.py"}], "after_files": [{"content": "import collections\nimport contextlib\nimport logging\nimport os\n\nfrom voluptuous import MultipleInvalid\n\nfrom dvc.exceptions import DvcException\nfrom dvc.stage import serialize\nfrom dvc.stage.exceptions import (\n StageFileBadNameError,\n StageFileDoesNotExistError,\n StageFileFormatError,\n StageFileIsNotDvcFileError,\n)\nfrom dvc.stage.loader import SingleStageLoader, StageLoader\nfrom dvc.utils import relpath\nfrom dvc.utils.collections import apply_diff\nfrom dvc.utils.yaml import dump_yaml, parse_yaml, parse_yaml_for_update\n\nlogger = logging.getLogger(__name__)\n\nDVC_FILE = \"Dvcfile\"\nDVC_FILE_SUFFIX = \".dvc\"\nPIPELINE_FILE = \"dvc.yaml\"\nPIPELINE_LOCK = \"dvc.lock\"\n\n\nclass LockfileCorruptedError(DvcException):\n pass\n\n\ndef is_valid_filename(path):\n return path.endswith(DVC_FILE_SUFFIX) or os.path.basename(path) in [\n DVC_FILE,\n PIPELINE_FILE,\n ]\n\n\ndef is_dvc_file(path):\n return os.path.isfile(path) and (\n is_valid_filename(path) or os.path.basename(path) == PIPELINE_LOCK\n )\n\n\ndef check_dvc_filename(path):\n if not is_valid_filename(path):\n raise StageFileBadNameError(\n \"bad DVC-file name '{}'. DVC-files should be named \"\n \"'Dvcfile' or have a '.dvc' suffix (e.g. '{}.dvc').\".format(\n relpath(path), os.path.basename(path)\n )\n )\n\n\nclass FileMixin:\n SCHEMA = None\n\n def __init__(self, repo, path, **kwargs):\n self.repo = repo\n self.path = path\n\n def __repr__(self):\n return \"{}: {}\".format(\n self.__class__.__name__, relpath(self.path, self.repo.root_dir)\n )\n\n def __hash__(self):\n return hash(self.path)\n\n def __eq__(self, other):\n return self.repo == other.repo and os.path.abspath(\n self.path\n ) == os.path.abspath(other.path)\n\n def __str__(self):\n return f\"{self.__class__.__name__}: {self.relpath}\"\n\n @property\n def relpath(self):\n return relpath(self.path)\n\n def exists(self):\n return self.repo.tree.exists(self.path)\n\n def _load(self):\n # it raises the proper exceptions by priority:\n # 1. when the file doesn't exists\n # 2. filename is not a DVC-file\n # 3. path doesn't represent a regular file\n if not self.exists():\n raise StageFileDoesNotExistError(self.path)\n check_dvc_filename(self.path)\n if not self.repo.tree.isfile(self.path):\n raise StageFileIsNotDvcFileError(self.path)\n\n with self.repo.tree.open(self.path) as fd:\n stage_text = fd.read()\n d = parse_yaml(stage_text, self.path)\n self.validate(d, self.relpath)\n return d, stage_text\n\n @classmethod\n def validate(cls, d, fname=None):\n assert isinstance(cls.SCHEMA, collections.abc.Callable)\n try:\n cls.SCHEMA(d) # pylint: disable=not-callable\n except MultipleInvalid as exc:\n raise StageFileFormatError(f\"'{fname}' format error: {exc}\")\n\n def remove(self, force=False): # pylint: disable=unused-argument\n with contextlib.suppress(FileNotFoundError):\n os.unlink(self.path)\n\n def dump(self, stage, **kwargs):\n raise NotImplementedError\n\n\nclass SingleStageFile(FileMixin):\n from dvc.schema import COMPILED_SINGLE_STAGE_SCHEMA as SCHEMA\n\n @property\n def stage(self):\n data, raw = self._load()\n return SingleStageLoader.load_stage(self, data, raw)\n\n @property\n def stages(self):\n data, raw = self._load()\n return SingleStageLoader(self, data, raw)\n\n def dump(self, stage, **kwargs):\n \"\"\"Dumps given stage appropriately in the dvcfile.\"\"\"\n from dvc.stage import PipelineStage\n\n assert not isinstance(stage, PipelineStage)\n check_dvc_filename(self.path)\n logger.debug(\n \"Saving information to '{file}'.\".format(file=relpath(self.path))\n )\n dump_yaml(self.path, serialize.to_single_stage_file(stage))\n self.repo.scm.track_file(self.relpath)\n\n def remove_stage(self, stage): # pylint: disable=unused-argument\n self.remove()\n\n\nclass PipelineFile(FileMixin):\n \"\"\"Abstraction for pipelines file, .yaml + .lock combined.\"\"\"\n\n from dvc.schema import COMPILED_MULTI_STAGE_SCHEMA as SCHEMA\n\n @property\n def _lockfile(self):\n return Lockfile(self.repo, os.path.splitext(self.path)[0] + \".lock\")\n\n def dump(\n self, stage, update_pipeline=False, no_lock=False, **kwargs\n ): # pylint: disable=arguments-differ\n \"\"\"Dumps given stage appropriately in the dvcfile.\"\"\"\n from dvc.stage import PipelineStage\n\n assert isinstance(stage, PipelineStage)\n check_dvc_filename(self.path)\n\n if update_pipeline and not stage.is_data_source:\n self._dump_pipeline_file(stage)\n\n if not no_lock:\n self._dump_lockfile(stage)\n\n def _dump_lockfile(self, stage):\n self._lockfile.dump(stage)\n\n def _dump_pipeline_file(self, stage):\n data = {}\n if self.exists():\n with open(self.path) as fd:\n data = parse_yaml_for_update(fd.read(), self.path)\n else:\n logger.info(\"Creating '%s'\", self.relpath)\n open(self.path, \"w+\").close()\n\n data[\"stages\"] = data.get(\"stages\", {})\n stage_data = serialize.to_pipeline_file(stage)\n existing_entry = stage.name in data[\"stages\"]\n\n action = \"Modifying\" if existing_entry else \"Adding\"\n logger.info(\"%s stage '%s' in '%s'\", action, stage.name, self.relpath)\n\n if existing_entry:\n orig_stage_data = data[\"stages\"][stage.name]\n if \"meta\" in orig_stage_data:\n stage_data[stage.name][\"meta\"] = orig_stage_data[\"meta\"]\n apply_diff(stage_data[stage.name], orig_stage_data)\n else:\n data[\"stages\"].update(stage_data)\n\n dump_yaml(self.path, data)\n self.repo.scm.track_file(self.relpath)\n\n @property\n def stage(self):\n raise DvcException(\n \"PipelineFile has multiple stages. Please specify it's name.\"\n )\n\n @property\n def stages(self):\n data, _ = self._load()\n lockfile_data = self._lockfile.load()\n return StageLoader(self, data.get(\"stages\", {}), lockfile_data)\n\n def remove(self, force=False):\n if not force:\n logger.warning(\"Cannot remove pipeline file.\")\n return\n\n super().remove()\n self._lockfile.remove()\n\n def remove_stage(self, stage):\n self._lockfile.remove_stage(stage)\n if not self.exists():\n return\n\n with open(self.path, \"r\") as f:\n d = parse_yaml_for_update(f.read(), self.path)\n\n self.validate(d, self.path)\n if stage.name not in d.get(\"stages\", {}):\n return\n\n logger.debug(\"Removing '%s' from '%s'\", stage.name, self.path)\n del d[\"stages\"][stage.name]\n\n if d[\"stages\"]:\n dump_yaml(self.path, d)\n else:\n super().remove()\n\n\nclass Lockfile(FileMixin):\n from dvc.schema import COMPILED_LOCKFILE_SCHEMA as SCHEMA\n\n def load(self):\n if not self.exists():\n return {}\n with self.repo.tree.open(self.path) as fd:\n data = parse_yaml(fd.read(), self.path)\n try:\n self.validate(data, fname=self.relpath)\n except StageFileFormatError:\n raise LockfileCorruptedError(\n f\"Lockfile '{self.relpath}' is corrupted.\"\n )\n return data\n\n def dump(self, stage, **kwargs):\n stage_data = serialize.to_lockfile(stage)\n if not self.exists():\n modified = True\n logger.info(\"Generating lock file '%s'\", self.relpath)\n data = stage_data\n open(self.path, \"w+\").close()\n else:\n with self.repo.tree.open(self.path, \"r\") as fd:\n data = parse_yaml_for_update(fd.read(), self.path)\n modified = data.get(stage.name, {}) != stage_data.get(\n stage.name, {}\n )\n if modified:\n logger.info(\"Updating lock file '%s'\", self.relpath)\n data.update(stage_data)\n dump_yaml(self.path, data)\n if modified:\n self.repo.scm.track_file(self.relpath)\n\n def remove_stage(self, stage):\n if not self.exists():\n return\n\n with open(self.path) as f:\n d = parse_yaml_for_update(f.read(), self.path)\n self.validate(d, self.path)\n\n if stage.name not in d:\n return\n\n logger.debug(\"Removing '%s' from '%s'\", stage.name, self.path)\n del d[stage.name]\n\n if d:\n dump_yaml(self.path, d)\n else:\n self.remove()\n\n\nclass Dvcfile:\n def __new__(cls, repo, path, **kwargs):\n assert path\n assert repo\n\n _, ext = os.path.splitext(path)\n if ext in [\".yaml\", \".yml\"]:\n return PipelineFile(repo, path, **kwargs)\n # fallback to single stage file for better error messages\n return SingleStageFile(repo, path, **kwargs)\n", "path": "dvc/dvcfile.py"}]}
| 3,240 | 203 |
gh_patches_debug_20002
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-2510
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Disk queues don't preserve Request class
When a Request subclass (e.g. FormRequest) is sent to a disk queue a bare Request is what you get back.
This is inconvenient for scrapy-splash: Splash requests all have Splash URL as request.url, but for logging it is nice to display the requested URL, not only Splash URL. In scrapy-splash this is implemented by changing `__repr__` in a Request subclass, but it works only when request is kept in memory.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/utils/reqser.py`
Content:
```
1 """
2 Helper functions for serializing (and deserializing) requests.
3 """
4 import six
5
6 from scrapy.http import Request
7 from scrapy.utils.python import to_unicode, to_native_str
8
9
10 def request_to_dict(request, spider=None):
11 """Convert Request object to a dict.
12
13 If a spider is given, it will try to find out the name of the spider method
14 used in the callback and store that as the callback.
15 """
16 cb = request.callback
17 if callable(cb):
18 cb = _find_method(spider, cb)
19 eb = request.errback
20 if callable(eb):
21 eb = _find_method(spider, eb)
22 d = {
23 'url': to_unicode(request.url), # urls should be safe (safe_string_url)
24 'callback': cb,
25 'errback': eb,
26 'method': request.method,
27 'headers': dict(request.headers),
28 'body': request.body,
29 'cookies': request.cookies,
30 'meta': request.meta,
31 '_encoding': request._encoding,
32 'priority': request.priority,
33 'dont_filter': request.dont_filter,
34 }
35 return d
36
37
38 def request_from_dict(d, spider=None):
39 """Create Request object from a dict.
40
41 If a spider is given, it will try to resolve the callbacks looking at the
42 spider for methods with the same name.
43 """
44 cb = d['callback']
45 if cb and spider:
46 cb = _get_method(spider, cb)
47 eb = d['errback']
48 if eb and spider:
49 eb = _get_method(spider, eb)
50 return Request(
51 url=to_native_str(d['url']),
52 callback=cb,
53 errback=eb,
54 method=d['method'],
55 headers=d['headers'],
56 body=d['body'],
57 cookies=d['cookies'],
58 meta=d['meta'],
59 encoding=d['_encoding'],
60 priority=d['priority'],
61 dont_filter=d['dont_filter'])
62
63
64 def _find_method(obj, func):
65 if obj:
66 try:
67 func_self = six.get_method_self(func)
68 except AttributeError: # func has no __self__
69 pass
70 else:
71 if func_self is obj:
72 return six.get_method_function(func).__name__
73 raise ValueError("Function %s is not a method of: %s" % (func, obj))
74
75
76 def _get_method(obj, name):
77 name = str(name)
78 try:
79 return getattr(obj, name)
80 except AttributeError:
81 raise ValueError("Method %r not found in: %s" % (name, obj))
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/utils/reqser.py b/scrapy/utils/reqser.py
--- a/scrapy/utils/reqser.py
+++ b/scrapy/utils/reqser.py
@@ -5,6 +5,7 @@
from scrapy.http import Request
from scrapy.utils.python import to_unicode, to_native_str
+from scrapy.utils.misc import load_object
def request_to_dict(request, spider=None):
@@ -32,6 +33,8 @@
'priority': request.priority,
'dont_filter': request.dont_filter,
}
+ if type(request) is not Request:
+ d['_class'] = request.__module__ + '.' + request.__class__.__name__
return d
@@ -47,7 +50,8 @@
eb = d['errback']
if eb and spider:
eb = _get_method(spider, eb)
- return Request(
+ request_cls = load_object(d['_class']) if '_class' in d else Request
+ return request_cls(
url=to_native_str(d['url']),
callback=cb,
errback=eb,
|
{"golden_diff": "diff --git a/scrapy/utils/reqser.py b/scrapy/utils/reqser.py\n--- a/scrapy/utils/reqser.py\n+++ b/scrapy/utils/reqser.py\n@@ -5,6 +5,7 @@\n \n from scrapy.http import Request\n from scrapy.utils.python import to_unicode, to_native_str\n+from scrapy.utils.misc import load_object\n \n \n def request_to_dict(request, spider=None):\n@@ -32,6 +33,8 @@\n 'priority': request.priority,\n 'dont_filter': request.dont_filter,\n }\n+ if type(request) is not Request:\n+ d['_class'] = request.__module__ + '.' + request.__class__.__name__\n return d\n \n \n@@ -47,7 +50,8 @@\n eb = d['errback']\n if eb and spider:\n eb = _get_method(spider, eb)\n- return Request(\n+ request_cls = load_object(d['_class']) if '_class' in d else Request\n+ return request_cls(\n url=to_native_str(d['url']),\n callback=cb,\n errback=eb,\n", "issue": "Disk queues don't preserve Request class\nWhen a Request subclass (e.g. FormRequest) is sent to a disk queue a bare Request is what you get back. \n\nThis is inconvenient for scrapy-splash: Splash requests all have Splash URL as request.url, but for logging it is nice to display the requested URL, not only Splash URL. In scrapy-splash this is implemented by changing `__repr__` in a Request subclass, but it works only when request is kept in memory.\n\n", "before_files": [{"content": "\"\"\"\nHelper functions for serializing (and deserializing) requests.\n\"\"\"\nimport six\n\nfrom scrapy.http import Request\nfrom scrapy.utils.python import to_unicode, to_native_str\n\n\ndef request_to_dict(request, spider=None):\n \"\"\"Convert Request object to a dict.\n\n If a spider is given, it will try to find out the name of the spider method\n used in the callback and store that as the callback.\n \"\"\"\n cb = request.callback\n if callable(cb):\n cb = _find_method(spider, cb)\n eb = request.errback\n if callable(eb):\n eb = _find_method(spider, eb)\n d = {\n 'url': to_unicode(request.url), # urls should be safe (safe_string_url)\n 'callback': cb,\n 'errback': eb,\n 'method': request.method,\n 'headers': dict(request.headers),\n 'body': request.body,\n 'cookies': request.cookies,\n 'meta': request.meta,\n '_encoding': request._encoding,\n 'priority': request.priority,\n 'dont_filter': request.dont_filter,\n }\n return d\n\n\ndef request_from_dict(d, spider=None):\n \"\"\"Create Request object from a dict.\n\n If a spider is given, it will try to resolve the callbacks looking at the\n spider for methods with the same name.\n \"\"\"\n cb = d['callback']\n if cb and spider:\n cb = _get_method(spider, cb)\n eb = d['errback']\n if eb and spider:\n eb = _get_method(spider, eb)\n return Request(\n url=to_native_str(d['url']),\n callback=cb,\n errback=eb,\n method=d['method'],\n headers=d['headers'],\n body=d['body'],\n cookies=d['cookies'],\n meta=d['meta'],\n encoding=d['_encoding'],\n priority=d['priority'],\n dont_filter=d['dont_filter'])\n\n\ndef _find_method(obj, func):\n if obj:\n try:\n func_self = six.get_method_self(func)\n except AttributeError: # func has no __self__\n pass\n else:\n if func_self is obj:\n return six.get_method_function(func).__name__\n raise ValueError(\"Function %s is not a method of: %s\" % (func, obj))\n\n\ndef _get_method(obj, name):\n name = str(name)\n try:\n return getattr(obj, name)\n except AttributeError:\n raise ValueError(\"Method %r not found in: %s\" % (name, obj))\n", "path": "scrapy/utils/reqser.py"}], "after_files": [{"content": "\"\"\"\nHelper functions for serializing (and deserializing) requests.\n\"\"\"\nimport six\n\nfrom scrapy.http import Request\nfrom scrapy.utils.python import to_unicode, to_native_str\nfrom scrapy.utils.misc import load_object\n\n\ndef request_to_dict(request, spider=None):\n \"\"\"Convert Request object to a dict.\n\n If a spider is given, it will try to find out the name of the spider method\n used in the callback and store that as the callback.\n \"\"\"\n cb = request.callback\n if callable(cb):\n cb = _find_method(spider, cb)\n eb = request.errback\n if callable(eb):\n eb = _find_method(spider, eb)\n d = {\n 'url': to_unicode(request.url), # urls should be safe (safe_string_url)\n 'callback': cb,\n 'errback': eb,\n 'method': request.method,\n 'headers': dict(request.headers),\n 'body': request.body,\n 'cookies': request.cookies,\n 'meta': request.meta,\n '_encoding': request._encoding,\n 'priority': request.priority,\n 'dont_filter': request.dont_filter,\n }\n if type(request) is not Request:\n d['_class'] = request.__module__ + '.' + request.__class__.__name__\n return d\n\n\ndef request_from_dict(d, spider=None):\n \"\"\"Create Request object from a dict.\n\n If a spider is given, it will try to resolve the callbacks looking at the\n spider for methods with the same name.\n \"\"\"\n cb = d['callback']\n if cb and spider:\n cb = _get_method(spider, cb)\n eb = d['errback']\n if eb and spider:\n eb = _get_method(spider, eb)\n request_cls = load_object(d['_class']) if '_class' in d else Request\n return request_cls(\n url=to_native_str(d['url']),\n callback=cb,\n errback=eb,\n method=d['method'],\n headers=d['headers'],\n body=d['body'],\n cookies=d['cookies'],\n meta=d['meta'],\n encoding=d['_encoding'],\n priority=d['priority'],\n dont_filter=d['dont_filter'])\n\n\ndef _find_method(obj, func):\n if obj:\n try:\n func_self = six.get_method_self(func)\n except AttributeError: # func has no __self__\n pass\n else:\n if func_self is obj:\n return six.get_method_function(func).__name__\n raise ValueError(\"Function %s is not a method of: %s\" % (func, obj))\n\n\ndef _get_method(obj, name):\n name = str(name)\n try:\n return getattr(obj, name)\n except AttributeError:\n raise ValueError(\"Method %r not found in: %s\" % (name, obj))\n", "path": "scrapy/utils/reqser.py"}]}
| 1,066 | 243 |
gh_patches_debug_16259
|
rasdani/github-patches
|
git_diff
|
tensorflow__addons-270
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update sparsemax to use tf.where V2
As described in #250 and temporarily patched in #251 sparsemax has one instance of tf.where that needs the broadcasting dimensions changed to match numpy and TF2 style.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tensorflow_addons/activations/sparsemax.py`
Content:
```
1 # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15
16 from __future__ import absolute_import
17 from __future__ import division
18 from __future__ import print_function
19
20 import tensorflow as tf
21
22 from tensorflow_addons.utils import keras_utils
23
24
25 @tf.function
26 @keras_utils.register_keras_custom_object
27 def sparsemax(logits, axis=-1, name=None):
28 """Sparsemax activation function [1].
29
30 For each batch `i` and class `j` we have
31 $$sparsemax[i, j] = max(logits[i, j] - tau(logits[i, :]), 0)$$
32
33 [1]: https://arxiv.org/abs/1602.02068
34
35 Args:
36 logits: Input tensor.
37 axis: Integer, axis along which the sparsemax operation is applied.
38 name: A name for the operation (optional).
39 Returns:
40 Tensor, output of sparsemax transformation. Has the same type and
41 shape as `logits`.
42 Raises:
43 ValueError: In case `dim(logits) == 1`.
44 """
45 logits = tf.convert_to_tensor(logits, name="logits")
46
47 # We need its original shape for shape inference.
48 shape = logits.get_shape()
49 rank = shape.rank
50 is_last_axis = (axis == -1) or (axis == rank - 1)
51
52 if is_last_axis:
53 output = _compute_2d_sparsemax(logits, name=name)
54 output.set_shape(shape)
55 return output
56
57 # If dim is not the last dimension, we have to do a transpose so that we can
58 # still perform softmax on its last dimension.
59
60 # Swap logits' dimension of dim and its last dimension.
61 rank_op = tf.rank(logits)
62 axis_norm = axis % rank
63 logits = _swap_axis(logits, axis_norm, tf.math.subtract(rank_op, 1))
64
65 # Do the actual softmax on its last dimension.
66 output = _compute_2d_sparsemax(logits)
67 output = _swap_axis(
68 output, axis_norm, tf.math.subtract(rank_op, 1), name=name)
69
70 # Make shape inference work since transpose may erase its static shape.
71 output.set_shape(shape)
72 return output
73
74
75 def _swap_axis(logits, dim_index, last_index, **kwargs):
76 return tf.transpose(
77 logits,
78 tf.concat([
79 tf.range(dim_index), [last_index],
80 tf.range(dim_index + 1, last_index), [dim_index]
81 ], 0), **kwargs)
82
83
84 @tf.function
85 def _compute_2d_sparsemax(logits, name=None):
86 """Performs the sparsemax operation when axis=-1."""
87 shape_op = tf.shape(logits)
88 obs = tf.math.reduce_prod(shape_op[:-1])
89 dims = shape_op[-1]
90
91 # In the paper, they call the logits z.
92 # The mean(logits) can be substracted from logits to make the algorithm
93 # more numerically stable. the instability in this algorithm comes mostly
94 # from the z_cumsum. Substacting the mean will cause z_cumsum to be close
95 # to zero. However, in practise the numerical instability issues are very
96 # minor and substacting the mean causes extra issues with inf and nan
97 # input.
98 # Reshape to [obs, dims] as it is almost free and means the remanining
99 # code doesn't need to worry about the rank.
100 z = tf.reshape(logits, [obs, dims])
101
102 # sort z
103 z_sorted, _ = tf.nn.top_k(z, k=dims)
104
105 # calculate k(z)
106 z_cumsum = tf.math.cumsum(z_sorted, axis=-1)
107 k = tf.range(1, tf.cast(dims, logits.dtype) + 1, dtype=logits.dtype)
108 z_check = 1 + k * z_sorted > z_cumsum
109 # because the z_check vector is always [1,1,...1,0,0,...0] finding the
110 # (index + 1) of the last `1` is the same as just summing the number of 1.
111 k_z = tf.math.reduce_sum(tf.cast(z_check, tf.int32), axis=-1)
112
113 # calculate tau(z)
114 # If there are inf values or all values are -inf, the k_z will be zero,
115 # this is mathematically invalid and will also cause the gather_nd to fail.
116 # Prevent this issue for now by setting k_z = 1 if k_z = 0, this is then
117 # fixed later (see p_safe) by returning p = nan. This results in the same
118 # behavior as softmax.
119 k_z_safe = tf.math.maximum(k_z, 1)
120 indices = tf.stack(
121 [tf.range(0, obs), tf.reshape(k_z_safe, [-1]) - 1], axis=1)
122 tau_sum = tf.gather_nd(z_cumsum, indices)
123 tau_z = (tau_sum - 1) / tf.cast(k_z, logits.dtype)
124
125 # calculate p
126 p = tf.math.maximum(
127 tf.cast(0, logits.dtype), z - tf.expand_dims(tau_z, -1))
128 # If k_z = 0 or if z = nan, then the input is invalid
129 # TODO: Adjust dimension order for TF2 broadcasting
130 p_safe = tf.compat.v1.where(
131 tf.math.logical_or(
132 tf.math.equal(k_z, 0), tf.math.is_nan(z_cumsum[:, -1])),
133 tf.fill([obs, dims], tf.cast(float("nan"), logits.dtype)), p)
134
135 # Reshape back to original size
136 p_safe = tf.reshape(p_safe, shape_op, name=name)
137 return p_safe
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tensorflow_addons/activations/sparsemax.py b/tensorflow_addons/activations/sparsemax.py
--- a/tensorflow_addons/activations/sparsemax.py
+++ b/tensorflow_addons/activations/sparsemax.py
@@ -126,11 +126,12 @@
p = tf.math.maximum(
tf.cast(0, logits.dtype), z - tf.expand_dims(tau_z, -1))
# If k_z = 0 or if z = nan, then the input is invalid
- # TODO: Adjust dimension order for TF2 broadcasting
- p_safe = tf.compat.v1.where(
- tf.math.logical_or(
- tf.math.equal(k_z, 0), tf.math.is_nan(z_cumsum[:, -1])),
- tf.fill([obs, dims], tf.cast(float("nan"), logits.dtype)), p)
+ p_safe = tf.where(
+ tf.expand_dims(
+ tf.math.logical_or(
+ tf.math.equal(k_z, 0), tf.math.is_nan(z_cumsum[:, -1])),
+ axis=-1), tf.fill([obs, dims], tf.cast(float("nan"),
+ logits.dtype)), p)
# Reshape back to original size
p_safe = tf.reshape(p_safe, shape_op, name=name)
|
{"golden_diff": "diff --git a/tensorflow_addons/activations/sparsemax.py b/tensorflow_addons/activations/sparsemax.py\n--- a/tensorflow_addons/activations/sparsemax.py\n+++ b/tensorflow_addons/activations/sparsemax.py\n@@ -126,11 +126,12 @@\n p = tf.math.maximum(\n tf.cast(0, logits.dtype), z - tf.expand_dims(tau_z, -1))\n # If k_z = 0 or if z = nan, then the input is invalid\n- # TODO: Adjust dimension order for TF2 broadcasting\n- p_safe = tf.compat.v1.where(\n- tf.math.logical_or(\n- tf.math.equal(k_z, 0), tf.math.is_nan(z_cumsum[:, -1])),\n- tf.fill([obs, dims], tf.cast(float(\"nan\"), logits.dtype)), p)\n+ p_safe = tf.where(\n+ tf.expand_dims(\n+ tf.math.logical_or(\n+ tf.math.equal(k_z, 0), tf.math.is_nan(z_cumsum[:, -1])),\n+ axis=-1), tf.fill([obs, dims], tf.cast(float(\"nan\"),\n+ logits.dtype)), p)\n \n # Reshape back to original size\n p_safe = tf.reshape(p_safe, shape_op, name=name)\n", "issue": "Update sparsemax to use tf.where V2\nAs described in #250 and temporarily patched in #251 sparsemax has one instance of tf.where that needs the broadcasting dimensions changed to match numpy and TF2 style.\n", "before_files": [{"content": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom tensorflow_addons.utils import keras_utils\n\n\[email protected]\n@keras_utils.register_keras_custom_object\ndef sparsemax(logits, axis=-1, name=None):\n \"\"\"Sparsemax activation function [1].\n\n For each batch `i` and class `j` we have\n $$sparsemax[i, j] = max(logits[i, j] - tau(logits[i, :]), 0)$$\n\n [1]: https://arxiv.org/abs/1602.02068\n\n Args:\n logits: Input tensor.\n axis: Integer, axis along which the sparsemax operation is applied.\n name: A name for the operation (optional).\n Returns:\n Tensor, output of sparsemax transformation. Has the same type and\n shape as `logits`.\n Raises:\n ValueError: In case `dim(logits) == 1`.\n \"\"\"\n logits = tf.convert_to_tensor(logits, name=\"logits\")\n\n # We need its original shape for shape inference.\n shape = logits.get_shape()\n rank = shape.rank\n is_last_axis = (axis == -1) or (axis == rank - 1)\n\n if is_last_axis:\n output = _compute_2d_sparsemax(logits, name=name)\n output.set_shape(shape)\n return output\n\n # If dim is not the last dimension, we have to do a transpose so that we can\n # still perform softmax on its last dimension.\n\n # Swap logits' dimension of dim and its last dimension.\n rank_op = tf.rank(logits)\n axis_norm = axis % rank\n logits = _swap_axis(logits, axis_norm, tf.math.subtract(rank_op, 1))\n\n # Do the actual softmax on its last dimension.\n output = _compute_2d_sparsemax(logits)\n output = _swap_axis(\n output, axis_norm, tf.math.subtract(rank_op, 1), name=name)\n\n # Make shape inference work since transpose may erase its static shape.\n output.set_shape(shape)\n return output\n\n\ndef _swap_axis(logits, dim_index, last_index, **kwargs):\n return tf.transpose(\n logits,\n tf.concat([\n tf.range(dim_index), [last_index],\n tf.range(dim_index + 1, last_index), [dim_index]\n ], 0), **kwargs)\n\n\[email protected]\ndef _compute_2d_sparsemax(logits, name=None):\n \"\"\"Performs the sparsemax operation when axis=-1.\"\"\"\n shape_op = tf.shape(logits)\n obs = tf.math.reduce_prod(shape_op[:-1])\n dims = shape_op[-1]\n\n # In the paper, they call the logits z.\n # The mean(logits) can be substracted from logits to make the algorithm\n # more numerically stable. the instability in this algorithm comes mostly\n # from the z_cumsum. Substacting the mean will cause z_cumsum to be close\n # to zero. However, in practise the numerical instability issues are very\n # minor and substacting the mean causes extra issues with inf and nan\n # input.\n # Reshape to [obs, dims] as it is almost free and means the remanining\n # code doesn't need to worry about the rank.\n z = tf.reshape(logits, [obs, dims])\n\n # sort z\n z_sorted, _ = tf.nn.top_k(z, k=dims)\n\n # calculate k(z)\n z_cumsum = tf.math.cumsum(z_sorted, axis=-1)\n k = tf.range(1, tf.cast(dims, logits.dtype) + 1, dtype=logits.dtype)\n z_check = 1 + k * z_sorted > z_cumsum\n # because the z_check vector is always [1,1,...1,0,0,...0] finding the\n # (index + 1) of the last `1` is the same as just summing the number of 1.\n k_z = tf.math.reduce_sum(tf.cast(z_check, tf.int32), axis=-1)\n\n # calculate tau(z)\n # If there are inf values or all values are -inf, the k_z will be zero,\n # this is mathematically invalid and will also cause the gather_nd to fail.\n # Prevent this issue for now by setting k_z = 1 if k_z = 0, this is then\n # fixed later (see p_safe) by returning p = nan. This results in the same\n # behavior as softmax.\n k_z_safe = tf.math.maximum(k_z, 1)\n indices = tf.stack(\n [tf.range(0, obs), tf.reshape(k_z_safe, [-1]) - 1], axis=1)\n tau_sum = tf.gather_nd(z_cumsum, indices)\n tau_z = (tau_sum - 1) / tf.cast(k_z, logits.dtype)\n\n # calculate p\n p = tf.math.maximum(\n tf.cast(0, logits.dtype), z - tf.expand_dims(tau_z, -1))\n # If k_z = 0 or if z = nan, then the input is invalid\n # TODO: Adjust dimension order for TF2 broadcasting\n p_safe = tf.compat.v1.where(\n tf.math.logical_or(\n tf.math.equal(k_z, 0), tf.math.is_nan(z_cumsum[:, -1])),\n tf.fill([obs, dims], tf.cast(float(\"nan\"), logits.dtype)), p)\n\n # Reshape back to original size\n p_safe = tf.reshape(p_safe, shape_op, name=name)\n return p_safe\n", "path": "tensorflow_addons/activations/sparsemax.py"}], "after_files": [{"content": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom tensorflow_addons.utils import keras_utils\n\n\[email protected]\n@keras_utils.register_keras_custom_object\ndef sparsemax(logits, axis=-1, name=None):\n \"\"\"Sparsemax activation function [1].\n\n For each batch `i` and class `j` we have\n $$sparsemax[i, j] = max(logits[i, j] - tau(logits[i, :]), 0)$$\n\n [1]: https://arxiv.org/abs/1602.02068\n\n Args:\n logits: Input tensor.\n axis: Integer, axis along which the sparsemax operation is applied.\n name: A name for the operation (optional).\n Returns:\n Tensor, output of sparsemax transformation. Has the same type and\n shape as `logits`.\n Raises:\n ValueError: In case `dim(logits) == 1`.\n \"\"\"\n logits = tf.convert_to_tensor(logits, name=\"logits\")\n\n # We need its original shape for shape inference.\n shape = logits.get_shape()\n rank = shape.rank\n is_last_axis = (axis == -1) or (axis == rank - 1)\n\n if is_last_axis:\n output = _compute_2d_sparsemax(logits, name=name)\n output.set_shape(shape)\n return output\n\n # If dim is not the last dimension, we have to do a transpose so that we can\n # still perform softmax on its last dimension.\n\n # Swap logits' dimension of dim and its last dimension.\n rank_op = tf.rank(logits)\n axis_norm = axis % rank\n logits = _swap_axis(logits, axis_norm, tf.math.subtract(rank_op, 1))\n\n # Do the actual softmax on its last dimension.\n output = _compute_2d_sparsemax(logits)\n output = _swap_axis(\n output, axis_norm, tf.math.subtract(rank_op, 1), name=name)\n\n # Make shape inference work since transpose may erase its static shape.\n output.set_shape(shape)\n return output\n\n\ndef _swap_axis(logits, dim_index, last_index, **kwargs):\n return tf.transpose(\n logits,\n tf.concat([\n tf.range(dim_index), [last_index],\n tf.range(dim_index + 1, last_index), [dim_index]\n ], 0), **kwargs)\n\n\[email protected]\ndef _compute_2d_sparsemax(logits, name=None):\n \"\"\"Performs the sparsemax operation when axis=-1.\"\"\"\n shape_op = tf.shape(logits)\n obs = tf.math.reduce_prod(shape_op[:-1])\n dims = shape_op[-1]\n\n # In the paper, they call the logits z.\n # The mean(logits) can be substracted from logits to make the algorithm\n # more numerically stable. the instability in this algorithm comes mostly\n # from the z_cumsum. Substacting the mean will cause z_cumsum to be close\n # to zero. However, in practise the numerical instability issues are very\n # minor and substacting the mean causes extra issues with inf and nan\n # input.\n # Reshape to [obs, dims] as it is almost free and means the remanining\n # code doesn't need to worry about the rank.\n z = tf.reshape(logits, [obs, dims])\n\n # sort z\n z_sorted, _ = tf.nn.top_k(z, k=dims)\n\n # calculate k(z)\n z_cumsum = tf.math.cumsum(z_sorted, axis=-1)\n k = tf.range(1, tf.cast(dims, logits.dtype) + 1, dtype=logits.dtype)\n z_check = 1 + k * z_sorted > z_cumsum\n # because the z_check vector is always [1,1,...1,0,0,...0] finding the\n # (index + 1) of the last `1` is the same as just summing the number of 1.\n k_z = tf.math.reduce_sum(tf.cast(z_check, tf.int32), axis=-1)\n\n # calculate tau(z)\n # If there are inf values or all values are -inf, the k_z will be zero,\n # this is mathematically invalid and will also cause the gather_nd to fail.\n # Prevent this issue for now by setting k_z = 1 if k_z = 0, this is then\n # fixed later (see p_safe) by returning p = nan. This results in the same\n # behavior as softmax.\n k_z_safe = tf.math.maximum(k_z, 1)\n indices = tf.stack(\n [tf.range(0, obs), tf.reshape(k_z_safe, [-1]) - 1], axis=1)\n tau_sum = tf.gather_nd(z_cumsum, indices)\n tau_z = (tau_sum - 1) / tf.cast(k_z, logits.dtype)\n\n # calculate p\n p = tf.math.maximum(\n tf.cast(0, logits.dtype), z - tf.expand_dims(tau_z, -1))\n # If k_z = 0 or if z = nan, then the input is invalid\n p_safe = tf.where(\n tf.expand_dims(\n tf.math.logical_or(\n tf.math.equal(k_z, 0), tf.math.is_nan(z_cumsum[:, -1])),\n axis=-1), tf.fill([obs, dims], tf.cast(float(\"nan\"),\n logits.dtype)), p)\n\n # Reshape back to original size\n p_safe = tf.reshape(p_safe, shape_op, name=name)\n return p_safe\n", "path": "tensorflow_addons/activations/sparsemax.py"}]}
| 1,988 | 291 |
gh_patches_debug_5264
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-contrib-308
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Asgi request headers are not retrieved properly
Environment:
Python: python3.8
fastapi==0.63.0
opentelemetry-api==0.16b1
opentelemetry-sdk==0.16b1
opentelemetry-instrumentation-fastapi==0.16b1
opentelemetry-exporter-google-cloud==0.16b1
opentelemetry-tools-google-cloud==0.16b1
When using `CloudTraceFormatPropagator` for [GCP](https://github.com/GoogleCloudPlatform/opentelemetry-operations-python), `X-Cloud-Trace-Context` header cannot be retrieved.
**Steps to reproduce**
Describe exactly how to reproduce the error. Include a code sample if applicable.
```
# server.py
import uvicorn
from fastapi import FastAPI, Request
from opentelemetry import trace
from opentelemetry.propagators import set_global_textmap
from opentelemetry.instrumentation.fastapi import FastAPIInstrumentor
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.tools import cloud_trace_propagator
set_global_textmap(cloud_trace_propagator.CloudTraceFormatPropagator())
app = FastAPI()
tracer = trace.get_tracer("test")
FastAPIInstrumentor.instrument_app(app)
@app.get("/trace")
async def test(r: Request):
with tracer.start_as_current_span("test") as span:
trace_id = span.get_span_context().trace_id
print(f"{trace_id:32x}") # should print trace ID from `X-Cloud-Trace-Context` header value
uvicorn.run(app)
```
```
# client.py
import requests
r = requests.Session()
r.headers.setdefault("X-Cloud-Trace-Context",
"f3ef5c2ede256aa77491057e600eca11/15104302039794794507;o=1")
r.get("http://localhost:8000/trace")
```
**What is the expected behavior?**
Printed value should be `f3ef5c2ede256aa77491057e600eca11` based from the header sent
**What is the actual behavior?**
A newly generated value everything `/trace` is called
**Additional context**
`X-Cloud-Trace-Context` header value is not retrieved properly in `CloudTraceFormatPropagator`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 The opentelemetry-instrumentation-asgi package provides an ASGI middleware that can be used
17 on any ASGI framework (such as Django-channels / Quart) to track requests
18 timing through OpenTelemetry.
19 """
20
21 import typing
22 import urllib
23 from functools import wraps
24 from typing import Tuple
25
26 from asgiref.compatibility import guarantee_single_callable
27
28 from opentelemetry import context, propagators, trace
29 from opentelemetry.instrumentation.asgi.version import __version__ # noqa
30 from opentelemetry.instrumentation.utils import http_status_to_status_code
31 from opentelemetry.trace.propagation.textmap import DictGetter
32 from opentelemetry.trace.status import Status, StatusCode
33
34
35 class CarrierGetter(DictGetter):
36 def get(
37 self, carrier: dict, key: str
38 ) -> typing.Optional[typing.List[str]]:
39 """Getter implementation to retrieve a HTTP header value from the ASGI
40 scope.
41
42 Args:
43 carrier: ASGI scope object
44 key: header name in scope
45 Returns:
46 A list with a single string with the header value if it exists,
47 else None.
48 """
49 headers = carrier.get("headers")
50 decoded = [
51 _value.decode("utf8")
52 for (_key, _value) in headers
53 if _key.decode("utf8") == key
54 ]
55 if not decoded:
56 return None
57 return decoded
58
59
60 carrier_getter = CarrierGetter()
61
62
63 def collect_request_attributes(scope):
64 """Collects HTTP request attributes from the ASGI scope and returns a
65 dictionary to be used as span creation attributes."""
66 server_host, port, http_url = get_host_port_url_tuple(scope)
67 query_string = scope.get("query_string")
68 if query_string and http_url:
69 if isinstance(query_string, bytes):
70 query_string = query_string.decode("utf8")
71 http_url = http_url + ("?" + urllib.parse.unquote(query_string))
72
73 result = {
74 "http.scheme": scope.get("scheme"),
75 "http.host": server_host,
76 "net.host.port": port,
77 "http.flavor": scope.get("http_version"),
78 "http.target": scope.get("path"),
79 "http.url": http_url,
80 }
81 http_method = scope.get("method")
82 if http_method:
83 result["http.method"] = http_method
84
85 http_host_value_list = carrier_getter.get(scope, "host")
86 if http_host_value_list:
87 result["http.server_name"] = ",".join(http_host_value_list)
88 http_user_agent = carrier_getter.get(scope, "user-agent")
89 if http_user_agent:
90 result["http.user_agent"] = http_user_agent[0]
91
92 if "client" in scope and scope["client"] is not None:
93 result["net.peer.ip"] = scope.get("client")[0]
94 result["net.peer.port"] = scope.get("client")[1]
95
96 # remove None values
97 result = {k: v for k, v in result.items() if v is not None}
98
99 return result
100
101
102 def get_host_port_url_tuple(scope):
103 """Returns (host, port, full_url) tuple.
104 """
105 server = scope.get("server") or ["0.0.0.0", 80]
106 port = server[1]
107 server_host = server[0] + (":" + str(port) if port != 80 else "")
108 full_path = scope.get("root_path", "") + scope.get("path", "")
109 http_url = scope.get("scheme", "http") + "://" + server_host + full_path
110 return server_host, port, http_url
111
112
113 def set_status_code(span, status_code):
114 """Adds HTTP response attributes to span using the status_code argument."""
115 if not span.is_recording():
116 return
117 try:
118 status_code = int(status_code)
119 except ValueError:
120 span.set_status(
121 Status(
122 StatusCode.ERROR,
123 "Non-integer HTTP status: " + repr(status_code),
124 )
125 )
126 else:
127 span.set_attribute("http.status_code", status_code)
128 span.set_status(Status(http_status_to_status_code(status_code)))
129
130
131 def get_default_span_details(scope: dict) -> Tuple[str, dict]:
132 """Default implementation for span_details_callback
133
134 Args:
135 scope: the asgi scope dictionary
136
137 Returns:
138 a tuple of the span, and any attributes to attach to the
139 span.
140 """
141 method_or_path = scope.get("method") or scope.get("path")
142
143 return method_or_path, {}
144
145
146 class OpenTelemetryMiddleware:
147 """The ASGI application middleware.
148
149 This class is an ASGI middleware that starts and annotates spans for any
150 requests it is invoked with.
151
152 Args:
153 app: The ASGI application callable to forward requests to.
154 span_details_callback: Callback which should return a string
155 and a tuple, representing the desired span name and a
156 dictionary with any additional span attributes to set.
157 Optional: Defaults to get_default_span_details.
158 """
159
160 def __init__(self, app, excluded_urls=None, span_details_callback=None):
161 self.app = guarantee_single_callable(app)
162 self.tracer = trace.get_tracer(__name__, __version__)
163 self.span_details_callback = (
164 span_details_callback or get_default_span_details
165 )
166 self.excluded_urls = excluded_urls
167
168 async def __call__(self, scope, receive, send):
169 """The ASGI application
170
171 Args:
172 scope: A ASGI environment.
173 receive: An awaitable callable yielding dictionaries
174 send: An awaitable callable taking a single dictionary as argument.
175 """
176 if scope["type"] not in ("http", "websocket"):
177 return await self.app(scope, receive, send)
178
179 _, _, url = get_host_port_url_tuple(scope)
180 if self.excluded_urls and self.excluded_urls.url_disabled(url):
181 return await self.app(scope, receive, send)
182
183 token = context.attach(propagators.extract(carrier_getter, scope))
184 span_name, additional_attributes = self.span_details_callback(scope)
185
186 try:
187 with self.tracer.start_as_current_span(
188 span_name + " asgi", kind=trace.SpanKind.SERVER,
189 ) as span:
190 if span.is_recording():
191 attributes = collect_request_attributes(scope)
192 attributes.update(additional_attributes)
193 for key, value in attributes.items():
194 span.set_attribute(key, value)
195
196 @wraps(receive)
197 async def wrapped_receive():
198 with self.tracer.start_as_current_span(
199 span_name + " asgi." + scope["type"] + ".receive"
200 ) as receive_span:
201 message = await receive()
202 if receive_span.is_recording():
203 if message["type"] == "websocket.receive":
204 set_status_code(receive_span, 200)
205 receive_span.set_attribute("type", message["type"])
206 return message
207
208 @wraps(send)
209 async def wrapped_send(message):
210 with self.tracer.start_as_current_span(
211 span_name + " asgi." + scope["type"] + ".send"
212 ) as send_span:
213 if send_span.is_recording():
214 if message["type"] == "http.response.start":
215 status_code = message["status"]
216 set_status_code(send_span, status_code)
217 elif message["type"] == "websocket.send":
218 set_status_code(send_span, 200)
219 send_span.set_attribute("type", message["type"])
220 await send(message)
221
222 await self.app(scope, wrapped_receive, wrapped_send)
223 finally:
224 context.detach(token)
225
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py
--- a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py
@@ -47,6 +47,11 @@
else None.
"""
headers = carrier.get("headers")
+ if not headers:
+ return None
+
+ # asgi header keys are in lower case
+ key = key.lower()
decoded = [
_value.decode("utf8")
for (_key, _value) in headers
|
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py\n@@ -47,6 +47,11 @@\n else None.\n \"\"\"\n headers = carrier.get(\"headers\")\n+ if not headers:\n+ return None\n+\n+ # asgi header keys are in lower case\n+ key = key.lower()\n decoded = [\n _value.decode(\"utf8\")\n for (_key, _value) in headers\n", "issue": "Asgi request headers are not retrieved properly\nEnvironment: \r\nPython: python3.8\r\n\r\nfastapi==0.63.0\r\nopentelemetry-api==0.16b1\r\nopentelemetry-sdk==0.16b1\r\nopentelemetry-instrumentation-fastapi==0.16b1\r\nopentelemetry-exporter-google-cloud==0.16b1\r\nopentelemetry-tools-google-cloud==0.16b1\r\n\r\nWhen using `CloudTraceFormatPropagator` for [GCP](https://github.com/GoogleCloudPlatform/opentelemetry-operations-python), `X-Cloud-Trace-Context` header cannot be retrieved.\r\n\r\n**Steps to reproduce**\r\nDescribe exactly how to reproduce the error. Include a code sample if applicable.\r\n\r\n```\r\n# server.py\r\nimport uvicorn\r\nfrom fastapi import FastAPI, Request\r\nfrom opentelemetry import trace\r\nfrom opentelemetry.propagators import set_global_textmap\r\nfrom opentelemetry.instrumentation.fastapi import FastAPIInstrumentor\r\nfrom opentelemetry.sdk.trace import TracerProvider\r\nfrom opentelemetry.tools import cloud_trace_propagator\r\n\r\nset_global_textmap(cloud_trace_propagator.CloudTraceFormatPropagator())\r\n\r\napp = FastAPI()\r\n\r\ntracer = trace.get_tracer(\"test\")\r\nFastAPIInstrumentor.instrument_app(app)\r\n\r\[email protected](\"/trace\")\r\nasync def test(r: Request):\r\n with tracer.start_as_current_span(\"test\") as span:\r\n trace_id = span.get_span_context().trace_id\r\n print(f\"{trace_id:32x}\") # should print trace ID from `X-Cloud-Trace-Context` header value\r\n\r\nuvicorn.run(app)\r\n```\r\n\r\n```\r\n# client.py\r\nimport requests\r\n\r\nr = requests.Session()\r\nr.headers.setdefault(\"X-Cloud-Trace-Context\",\r\n \"f3ef5c2ede256aa77491057e600eca11/15104302039794794507;o=1\")\r\nr.get(\"http://localhost:8000/trace\")\r\n```\r\n\r\n**What is the expected behavior?**\r\nPrinted value should be `f3ef5c2ede256aa77491057e600eca11` based from the header sent\r\n\r\n**What is the actual behavior?**\r\nA newly generated value everything `/trace` is called\r\n\r\n**Additional context**\r\n`X-Cloud-Trace-Context` header value is not retrieved properly in `CloudTraceFormatPropagator`\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe opentelemetry-instrumentation-asgi package provides an ASGI middleware that can be used\non any ASGI framework (such as Django-channels / Quart) to track requests\ntiming through OpenTelemetry.\n\"\"\"\n\nimport typing\nimport urllib\nfrom functools import wraps\nfrom typing import Tuple\n\nfrom asgiref.compatibility import guarantee_single_callable\n\nfrom opentelemetry import context, propagators, trace\nfrom opentelemetry.instrumentation.asgi.version import __version__ # noqa\nfrom opentelemetry.instrumentation.utils import http_status_to_status_code\nfrom opentelemetry.trace.propagation.textmap import DictGetter\nfrom opentelemetry.trace.status import Status, StatusCode\n\n\nclass CarrierGetter(DictGetter):\n def get(\n self, carrier: dict, key: str\n ) -> typing.Optional[typing.List[str]]:\n \"\"\"Getter implementation to retrieve a HTTP header value from the ASGI\n scope.\n\n Args:\n carrier: ASGI scope object\n key: header name in scope\n Returns:\n A list with a single string with the header value if it exists,\n else None.\n \"\"\"\n headers = carrier.get(\"headers\")\n decoded = [\n _value.decode(\"utf8\")\n for (_key, _value) in headers\n if _key.decode(\"utf8\") == key\n ]\n if not decoded:\n return None\n return decoded\n\n\ncarrier_getter = CarrierGetter()\n\n\ndef collect_request_attributes(scope):\n \"\"\"Collects HTTP request attributes from the ASGI scope and returns a\n dictionary to be used as span creation attributes.\"\"\"\n server_host, port, http_url = get_host_port_url_tuple(scope)\n query_string = scope.get(\"query_string\")\n if query_string and http_url:\n if isinstance(query_string, bytes):\n query_string = query_string.decode(\"utf8\")\n http_url = http_url + (\"?\" + urllib.parse.unquote(query_string))\n\n result = {\n \"http.scheme\": scope.get(\"scheme\"),\n \"http.host\": server_host,\n \"net.host.port\": port,\n \"http.flavor\": scope.get(\"http_version\"),\n \"http.target\": scope.get(\"path\"),\n \"http.url\": http_url,\n }\n http_method = scope.get(\"method\")\n if http_method:\n result[\"http.method\"] = http_method\n\n http_host_value_list = carrier_getter.get(scope, \"host\")\n if http_host_value_list:\n result[\"http.server_name\"] = \",\".join(http_host_value_list)\n http_user_agent = carrier_getter.get(scope, \"user-agent\")\n if http_user_agent:\n result[\"http.user_agent\"] = http_user_agent[0]\n\n if \"client\" in scope and scope[\"client\"] is not None:\n result[\"net.peer.ip\"] = scope.get(\"client\")[0]\n result[\"net.peer.port\"] = scope.get(\"client\")[1]\n\n # remove None values\n result = {k: v for k, v in result.items() if v is not None}\n\n return result\n\n\ndef get_host_port_url_tuple(scope):\n \"\"\"Returns (host, port, full_url) tuple.\n \"\"\"\n server = scope.get(\"server\") or [\"0.0.0.0\", 80]\n port = server[1]\n server_host = server[0] + (\":\" + str(port) if port != 80 else \"\")\n full_path = scope.get(\"root_path\", \"\") + scope.get(\"path\", \"\")\n http_url = scope.get(\"scheme\", \"http\") + \"://\" + server_host + full_path\n return server_host, port, http_url\n\n\ndef set_status_code(span, status_code):\n \"\"\"Adds HTTP response attributes to span using the status_code argument.\"\"\"\n if not span.is_recording():\n return\n try:\n status_code = int(status_code)\n except ValueError:\n span.set_status(\n Status(\n StatusCode.ERROR,\n \"Non-integer HTTP status: \" + repr(status_code),\n )\n )\n else:\n span.set_attribute(\"http.status_code\", status_code)\n span.set_status(Status(http_status_to_status_code(status_code)))\n\n\ndef get_default_span_details(scope: dict) -> Tuple[str, dict]:\n \"\"\"Default implementation for span_details_callback\n\n Args:\n scope: the asgi scope dictionary\n\n Returns:\n a tuple of the span, and any attributes to attach to the\n span.\n \"\"\"\n method_or_path = scope.get(\"method\") or scope.get(\"path\")\n\n return method_or_path, {}\n\n\nclass OpenTelemetryMiddleware:\n \"\"\"The ASGI application middleware.\n\n This class is an ASGI middleware that starts and annotates spans for any\n requests it is invoked with.\n\n Args:\n app: The ASGI application callable to forward requests to.\n span_details_callback: Callback which should return a string\n and a tuple, representing the desired span name and a\n dictionary with any additional span attributes to set.\n Optional: Defaults to get_default_span_details.\n \"\"\"\n\n def __init__(self, app, excluded_urls=None, span_details_callback=None):\n self.app = guarantee_single_callable(app)\n self.tracer = trace.get_tracer(__name__, __version__)\n self.span_details_callback = (\n span_details_callback or get_default_span_details\n )\n self.excluded_urls = excluded_urls\n\n async def __call__(self, scope, receive, send):\n \"\"\"The ASGI application\n\n Args:\n scope: A ASGI environment.\n receive: An awaitable callable yielding dictionaries\n send: An awaitable callable taking a single dictionary as argument.\n \"\"\"\n if scope[\"type\"] not in (\"http\", \"websocket\"):\n return await self.app(scope, receive, send)\n\n _, _, url = get_host_port_url_tuple(scope)\n if self.excluded_urls and self.excluded_urls.url_disabled(url):\n return await self.app(scope, receive, send)\n\n token = context.attach(propagators.extract(carrier_getter, scope))\n span_name, additional_attributes = self.span_details_callback(scope)\n\n try:\n with self.tracer.start_as_current_span(\n span_name + \" asgi\", kind=trace.SpanKind.SERVER,\n ) as span:\n if span.is_recording():\n attributes = collect_request_attributes(scope)\n attributes.update(additional_attributes)\n for key, value in attributes.items():\n span.set_attribute(key, value)\n\n @wraps(receive)\n async def wrapped_receive():\n with self.tracer.start_as_current_span(\n span_name + \" asgi.\" + scope[\"type\"] + \".receive\"\n ) as receive_span:\n message = await receive()\n if receive_span.is_recording():\n if message[\"type\"] == \"websocket.receive\":\n set_status_code(receive_span, 200)\n receive_span.set_attribute(\"type\", message[\"type\"])\n return message\n\n @wraps(send)\n async def wrapped_send(message):\n with self.tracer.start_as_current_span(\n span_name + \" asgi.\" + scope[\"type\"] + \".send\"\n ) as send_span:\n if send_span.is_recording():\n if message[\"type\"] == \"http.response.start\":\n status_code = message[\"status\"]\n set_status_code(send_span, status_code)\n elif message[\"type\"] == \"websocket.send\":\n set_status_code(send_span, 200)\n send_span.set_attribute(\"type\", message[\"type\"])\n await send(message)\n\n await self.app(scope, wrapped_receive, wrapped_send)\n finally:\n context.detach(token)\n", "path": "instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe opentelemetry-instrumentation-asgi package provides an ASGI middleware that can be used\non any ASGI framework (such as Django-channels / Quart) to track requests\ntiming through OpenTelemetry.\n\"\"\"\n\nimport typing\nimport urllib\nfrom functools import wraps\nfrom typing import Tuple\n\nfrom asgiref.compatibility import guarantee_single_callable\n\nfrom opentelemetry import context, propagators, trace\nfrom opentelemetry.instrumentation.asgi.version import __version__ # noqa\nfrom opentelemetry.instrumentation.utils import http_status_to_status_code\nfrom opentelemetry.trace.propagation.textmap import DictGetter\nfrom opentelemetry.trace.status import Status, StatusCode\n\n\nclass CarrierGetter(DictGetter):\n def get(\n self, carrier: dict, key: str\n ) -> typing.Optional[typing.List[str]]:\n \"\"\"Getter implementation to retrieve a HTTP header value from the ASGI\n scope.\n\n Args:\n carrier: ASGI scope object\n key: header name in scope\n Returns:\n A list with a single string with the header value if it exists,\n else None.\n \"\"\"\n headers = carrier.get(\"headers\")\n if not headers:\n return None\n\n # asgi header keys are in lower case\n key = key.lower()\n decoded = [\n _value.decode(\"utf8\")\n for (_key, _value) in headers\n if _key.decode(\"utf8\") == key\n ]\n if not decoded:\n return None\n return decoded\n\n\ncarrier_getter = CarrierGetter()\n\n\ndef collect_request_attributes(scope):\n \"\"\"Collects HTTP request attributes from the ASGI scope and returns a\n dictionary to be used as span creation attributes.\"\"\"\n server_host, port, http_url = get_host_port_url_tuple(scope)\n query_string = scope.get(\"query_string\")\n if query_string and http_url:\n if isinstance(query_string, bytes):\n query_string = query_string.decode(\"utf8\")\n http_url = http_url + (\"?\" + urllib.parse.unquote(query_string))\n\n result = {\n \"http.scheme\": scope.get(\"scheme\"),\n \"http.host\": server_host,\n \"net.host.port\": port,\n \"http.flavor\": scope.get(\"http_version\"),\n \"http.target\": scope.get(\"path\"),\n \"http.url\": http_url,\n }\n http_method = scope.get(\"method\")\n if http_method:\n result[\"http.method\"] = http_method\n\n http_host_value_list = carrier_getter.get(scope, \"host\")\n if http_host_value_list:\n result[\"http.server_name\"] = \",\".join(http_host_value_list)\n http_user_agent = carrier_getter.get(scope, \"user-agent\")\n if http_user_agent:\n result[\"http.user_agent\"] = http_user_agent[0]\n\n if \"client\" in scope and scope[\"client\"] is not None:\n result[\"net.peer.ip\"] = scope.get(\"client\")[0]\n result[\"net.peer.port\"] = scope.get(\"client\")[1]\n\n # remove None values\n result = {k: v for k, v in result.items() if v is not None}\n\n return result\n\n\ndef get_host_port_url_tuple(scope):\n \"\"\"Returns (host, port, full_url) tuple.\n \"\"\"\n server = scope.get(\"server\") or [\"0.0.0.0\", 80]\n port = server[1]\n server_host = server[0] + (\":\" + str(port) if port != 80 else \"\")\n full_path = scope.get(\"root_path\", \"\") + scope.get(\"path\", \"\")\n http_url = scope.get(\"scheme\", \"http\") + \"://\" + server_host + full_path\n return server_host, port, http_url\n\n\ndef set_status_code(span, status_code):\n \"\"\"Adds HTTP response attributes to span using the status_code argument.\"\"\"\n if not span.is_recording():\n return\n try:\n status_code = int(status_code)\n except ValueError:\n span.set_status(\n Status(\n StatusCode.ERROR,\n \"Non-integer HTTP status: \" + repr(status_code),\n )\n )\n else:\n span.set_attribute(\"http.status_code\", status_code)\n span.set_status(Status(http_status_to_status_code(status_code)))\n\n\ndef get_default_span_details(scope: dict) -> Tuple[str, dict]:\n \"\"\"Default implementation for span_details_callback\n\n Args:\n scope: the asgi scope dictionary\n\n Returns:\n a tuple of the span, and any attributes to attach to the\n span.\n \"\"\"\n method_or_path = scope.get(\"method\") or scope.get(\"path\")\n\n return method_or_path, {}\n\n\nclass OpenTelemetryMiddleware:\n \"\"\"The ASGI application middleware.\n\n This class is an ASGI middleware that starts and annotates spans for any\n requests it is invoked with.\n\n Args:\n app: The ASGI application callable to forward requests to.\n span_details_callback: Callback which should return a string\n and a tuple, representing the desired span name and a\n dictionary with any additional span attributes to set.\n Optional: Defaults to get_default_span_details.\n \"\"\"\n\n def __init__(self, app, excluded_urls=None, span_details_callback=None):\n self.app = guarantee_single_callable(app)\n self.tracer = trace.get_tracer(__name__, __version__)\n self.span_details_callback = (\n span_details_callback or get_default_span_details\n )\n self.excluded_urls = excluded_urls\n\n async def __call__(self, scope, receive, send):\n \"\"\"The ASGI application\n\n Args:\n scope: A ASGI environment.\n receive: An awaitable callable yielding dictionaries\n send: An awaitable callable taking a single dictionary as argument.\n \"\"\"\n if scope[\"type\"] not in (\"http\", \"websocket\"):\n return await self.app(scope, receive, send)\n\n _, _, url = get_host_port_url_tuple(scope)\n if self.excluded_urls and self.excluded_urls.url_disabled(url):\n return await self.app(scope, receive, send)\n\n token = context.attach(propagators.extract(carrier_getter, scope))\n span_name, additional_attributes = self.span_details_callback(scope)\n\n try:\n with self.tracer.start_as_current_span(\n span_name + \" asgi\", kind=trace.SpanKind.SERVER,\n ) as span:\n if span.is_recording():\n attributes = collect_request_attributes(scope)\n attributes.update(additional_attributes)\n for key, value in attributes.items():\n span.set_attribute(key, value)\n\n @wraps(receive)\n async def wrapped_receive():\n with self.tracer.start_as_current_span(\n span_name + \" asgi.\" + scope[\"type\"] + \".receive\"\n ) as receive_span:\n message = await receive()\n if receive_span.is_recording():\n if message[\"type\"] == \"websocket.receive\":\n set_status_code(receive_span, 200)\n receive_span.set_attribute(\"type\", message[\"type\"])\n return message\n\n @wraps(send)\n async def wrapped_send(message):\n with self.tracer.start_as_current_span(\n span_name + \" asgi.\" + scope[\"type\"] + \".send\"\n ) as send_span:\n if send_span.is_recording():\n if message[\"type\"] == \"http.response.start\":\n status_code = message[\"status\"]\n set_status_code(send_span, status_code)\n elif message[\"type\"] == \"websocket.send\":\n set_status_code(send_span, 200)\n send_span.set_attribute(\"type\", message[\"type\"])\n await send(message)\n\n await self.app(scope, wrapped_receive, wrapped_send)\n finally:\n context.detach(token)\n", "path": "instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py"}]}
| 3,140 | 189 |
gh_patches_debug_7433
|
rasdani/github-patches
|
git_diff
|
SciTools__cartopy-439
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: Geostationary example bug
```
python cartopy/docs/source/examples/geostationary.py
Traceback (most recent call last):
File "/net/home/h05/cpelley/git/cartopy/docs/source/examples/geostationary.py", line 60, in <module>
main()
File "/net/home/h05/cpelley/git/cartopy/docs/source/examples/geostationary.py", line 54, in main
img, crs, extent, origin = geos_image()
File "/net/home/h05/cpelley/git/cartopy/docs/source/examples/geostationary.py", line 43, in geos_image
img_handle = BytesIO(urllib2.urlopen(url).read())
NameError: global name 'urllib2' is not defined
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/cartopy/examples/geostationary.py`
Content:
```
1 """
2 Reprojecting images from a Geostationary projection
3 ---------------------------------------------------
4
5 This example demonstrates Cartopy's ability to project images into the desired
6 projection on-the-fly. The image itself is retrieved from a URL and is loaded
7 directly into memory without storing it intermediately into a file. It
8 represents pre-processed data from Moderate-Resolution Imaging
9 Spectroradiometer (MODIS) which has been put into an image in the data's
10 native Geostationary coordinate system - it is then projected by cartopy
11 into a global Miller map.
12
13 """
14 __tags__ = ["Scalar data"]
15 try:
16 from urllib2 import urlopen
17 except ImportError:
18 from urllib.request import urlopen
19 from io import BytesIO
20
21 import cartopy.crs as ccrs
22 import matplotlib.pyplot as plt
23
24
25 def geos_image():
26 """
27 Return a specific MODIS image by retrieving it from a github gist URL.
28
29 Returns
30 -------
31 img : numpy array
32 The pixels of the image in a numpy array.
33 img_proj : cartopy CRS
34 The rectangular coordinate system of the image.
35 img_extent : tuple of floats
36 The extent of the image ``(x0, y0, x1, y1)`` referenced in
37 the ``img_proj`` coordinate system.
38 origin : str
39 The origin of the image to be passed through to matplotlib's imshow.
40
41 """
42 url = ('https://gist.github.com/pelson/5871263/raw/'
43 'EIDA50_201211061300_clip2.png')
44 img_handle = BytesIO(urllib2.urlopen(url).read())
45 img = plt.imread(img_handle)
46 img_proj = ccrs.Geostationary(satellite_height=35786000)
47 img_extent = (-5500000, 5500000, -5500000, 5500000)
48 return img, img_proj, img_extent, 'upper'
49
50
51 def main():
52 ax = plt.axes(projection=ccrs.Miller())
53 ax.coastlines()
54 ax.set_global()
55 img, crs, extent, origin = geos_image()
56 plt.imshow(img, transform=crs, extent=extent, origin=origin, cmap='gray')
57 plt.show()
58
59
60 if __name__ == '__main__':
61 main()
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lib/cartopy/examples/geostationary.py b/lib/cartopy/examples/geostationary.py
--- a/lib/cartopy/examples/geostationary.py
+++ b/lib/cartopy/examples/geostationary.py
@@ -41,7 +41,7 @@
"""
url = ('https://gist.github.com/pelson/5871263/raw/'
'EIDA50_201211061300_clip2.png')
- img_handle = BytesIO(urllib2.urlopen(url).read())
+ img_handle = BytesIO(urlopen(url).read())
img = plt.imread(img_handle)
img_proj = ccrs.Geostationary(satellite_height=35786000)
img_extent = (-5500000, 5500000, -5500000, 5500000)
|
{"golden_diff": "diff --git a/lib/cartopy/examples/geostationary.py b/lib/cartopy/examples/geostationary.py\n--- a/lib/cartopy/examples/geostationary.py\n+++ b/lib/cartopy/examples/geostationary.py\n@@ -41,7 +41,7 @@\n \"\"\"\n url = ('https://gist.github.com/pelson/5871263/raw/'\n 'EIDA50_201211061300_clip2.png')\n- img_handle = BytesIO(urllib2.urlopen(url).read())\n+ img_handle = BytesIO(urlopen(url).read())\n img = plt.imread(img_handle)\n img_proj = ccrs.Geostationary(satellite_height=35786000)\n img_extent = (-5500000, 5500000, -5500000, 5500000)\n", "issue": "BUG: Geostationary example bug\n```\npython cartopy/docs/source/examples/geostationary.py\nTraceback (most recent call last):\n File \"/net/home/h05/cpelley/git/cartopy/docs/source/examples/geostationary.py\", line 60, in <module>\n main()\n File \"/net/home/h05/cpelley/git/cartopy/docs/source/examples/geostationary.py\", line 54, in main\n img, crs, extent, origin = geos_image()\n File \"/net/home/h05/cpelley/git/cartopy/docs/source/examples/geostationary.py\", line 43, in geos_image\n img_handle = BytesIO(urllib2.urlopen(url).read())\nNameError: global name 'urllib2' is not defined\n```\n\n", "before_files": [{"content": "\"\"\"\nReprojecting images from a Geostationary projection\n---------------------------------------------------\n\nThis example demonstrates Cartopy's ability to project images into the desired\nprojection on-the-fly. The image itself is retrieved from a URL and is loaded\ndirectly into memory without storing it intermediately into a file. It\nrepresents pre-processed data from Moderate-Resolution Imaging\nSpectroradiometer (MODIS) which has been put into an image in the data's\nnative Geostationary coordinate system - it is then projected by cartopy\ninto a global Miller map.\n\n\"\"\"\n__tags__ = [\"Scalar data\"]\ntry:\n from urllib2 import urlopen\nexcept ImportError:\n from urllib.request import urlopen\nfrom io import BytesIO\n\nimport cartopy.crs as ccrs\nimport matplotlib.pyplot as plt\n\n\ndef geos_image():\n \"\"\"\n Return a specific MODIS image by retrieving it from a github gist URL.\n\n Returns\n -------\n img : numpy array\n The pixels of the image in a numpy array.\n img_proj : cartopy CRS\n The rectangular coordinate system of the image.\n img_extent : tuple of floats\n The extent of the image ``(x0, y0, x1, y1)`` referenced in\n the ``img_proj`` coordinate system.\n origin : str\n The origin of the image to be passed through to matplotlib's imshow.\n\n \"\"\"\n url = ('https://gist.github.com/pelson/5871263/raw/'\n 'EIDA50_201211061300_clip2.png')\n img_handle = BytesIO(urllib2.urlopen(url).read())\n img = plt.imread(img_handle)\n img_proj = ccrs.Geostationary(satellite_height=35786000)\n img_extent = (-5500000, 5500000, -5500000, 5500000)\n return img, img_proj, img_extent, 'upper'\n\n\ndef main():\n ax = plt.axes(projection=ccrs.Miller())\n ax.coastlines()\n ax.set_global()\n img, crs, extent, origin = geos_image()\n plt.imshow(img, transform=crs, extent=extent, origin=origin, cmap='gray')\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n", "path": "lib/cartopy/examples/geostationary.py"}], "after_files": [{"content": "\"\"\"\nReprojecting images from a Geostationary projection\n---------------------------------------------------\n\nThis example demonstrates Cartopy's ability to project images into the desired\nprojection on-the-fly. The image itself is retrieved from a URL and is loaded\ndirectly into memory without storing it intermediately into a file. It\nrepresents pre-processed data from Moderate-Resolution Imaging\nSpectroradiometer (MODIS) which has been put into an image in the data's\nnative Geostationary coordinate system - it is then projected by cartopy\ninto a global Miller map.\n\n\"\"\"\n__tags__ = [\"Scalar data\"]\ntry:\n from urllib2 import urlopen\nexcept ImportError:\n from urllib.request import urlopen\nfrom io import BytesIO\n\nimport cartopy.crs as ccrs\nimport matplotlib.pyplot as plt\n\n\ndef geos_image():\n \"\"\"\n Return a specific MODIS image by retrieving it from a github gist URL.\n\n Returns\n -------\n img : numpy array\n The pixels of the image in a numpy array.\n img_proj : cartopy CRS\n The rectangular coordinate system of the image.\n img_extent : tuple of floats\n The extent of the image ``(x0, y0, x1, y1)`` referenced in\n the ``img_proj`` coordinate system.\n origin : str\n The origin of the image to be passed through to matplotlib's imshow.\n\n \"\"\"\n url = ('https://gist.github.com/pelson/5871263/raw/'\n 'EIDA50_201211061300_clip2.png')\n img_handle = BytesIO(urlopen(url).read())\n img = plt.imread(img_handle)\n img_proj = ccrs.Geostationary(satellite_height=35786000)\n img_extent = (-5500000, 5500000, -5500000, 5500000)\n return img, img_proj, img_extent, 'upper'\n\n\ndef main():\n ax = plt.axes(projection=ccrs.Miller())\n ax.coastlines()\n ax.set_global()\n img, crs, extent, origin = geos_image()\n plt.imshow(img, transform=crs, extent=extent, origin=origin, cmap='gray')\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n", "path": "lib/cartopy/examples/geostationary.py"}]}
| 1,075 | 207 |
gh_patches_debug_50456
|
rasdani/github-patches
|
git_diff
|
jupyterhub__jupyterhub-263
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Single user server launch is broken
I think that #261 broke the launching of the single user server. I am seeing the following errors in the nbgrader tests:
```
Traceback (most recent call last):
File "/Users/jhamrick/.virtualenvs/nbgrader/bin/jupyterhub-singleuser", line 6, in <module>
exec(compile(open(__file__).read(), __file__, 'exec'))
File "/Users/jhamrick/project/tools/jupyterhub/scripts/jupyterhub-singleuser", line 4, in <module>
main()
File "/Users/jhamrick/project/tools/jupyterhub/jupyterhub/singleuser.py", line 221, in main
return SingleUserNotebookApp.launch_instance()
File "/Users/jhamrick/.virtualenvs/nbgrader/lib/python3.4/site-packages/IPython/config/application.py", line 573, in launch_instance
app.initialize(argv)
File "<string>", line 2, in initialize
File "/Users/jhamrick/.virtualenvs/nbgrader/lib/python3.4/site-packages/IPython/config/application.py", line 75, in catch_config_error
return method(app, *args, **kwargs)
File "/Users/jhamrick/.virtualenvs/nbgrader/lib/python3.4/site-packages/IPython/html/notebookapp.py", line 1015, in initialize
self.init_webapp()
File "/Users/jhamrick/project/tools/jupyterhub/jupyterhub/singleuser.py", line 191, in init_webapp
s['user'] = self.user
File "/Users/jhamrick/.virtualenvs/nbgrader/lib/python3.4/site-packages/traitlets/traitlets.py", line 438, in __get__
% (self.name, obj))
traitlets.traitlets.TraitError: No default value found for None trait of <jupyterhub.singleuser.SingleUserNotebookApp object at 0x102953b00>
```
If I revert to the version of jupyterhub prior to that PR, this error does not occur. @epifanio reported on gitter seeing the same thing as well, so I don't think it's isolated to nbgrader.
Given the error message, I suspect this has to do with ipython/traitlets#39 and/or ipython/traitlets#40 though I haven't actually tested it. I tried giving the `user` trait a default value but it did not seem to fix the error. I will try to do a bit more debugging, but I fear I don't really understand the internals of traitlets well enough to know exactly what's going on here.
Ping @takluyver and @minrk ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `jupyterhub/singleuser.py`
Content:
```
1 #!/usr/bin/env python3
2 """Extend regular notebook server to be aware of multiuser things."""
3
4 # Copyright (c) Jupyter Development Team.
5 # Distributed under the terms of the Modified BSD License.
6
7 import os
8 try:
9 from urllib.parse import quote
10 except ImportError:
11 # PY2 Compat
12 from urllib import quote
13
14 import requests
15 from jinja2 import ChoiceLoader, FunctionLoader
16
17 from tornado import ioloop
18 from tornado.web import HTTPError
19
20 from traitlets import (
21 Integer,
22 Unicode,
23 CUnicode,
24 )
25
26 from IPython.html.notebookapp import NotebookApp, aliases as notebook_aliases
27 from IPython.html.auth.login import LoginHandler
28 from IPython.html.auth.logout import LogoutHandler
29
30 from IPython.html.utils import url_path_join
31
32
33 from distutils.version import LooseVersion as V
34
35 import IPython
36 if V(IPython.__version__) < V('3.0'):
37 raise ImportError("JupyterHub Requires IPython >= 3.0, found %s" % IPython.__version__)
38
39 # Define two methods to attach to AuthenticatedHandler,
40 # which authenticate via the central auth server.
41
42 class JupyterHubLoginHandler(LoginHandler):
43 @staticmethod
44 def login_available(settings):
45 return True
46
47 @staticmethod
48 def verify_token(self, cookie_name, encrypted_cookie):
49 """method for token verification"""
50 cookie_cache = self.settings['cookie_cache']
51 if encrypted_cookie in cookie_cache:
52 # we've seen this token before, don't ask upstream again
53 return cookie_cache[encrypted_cookie]
54
55 hub_api_url = self.settings['hub_api_url']
56 hub_api_key = self.settings['hub_api_key']
57 r = requests.get(url_path_join(
58 hub_api_url, "authorizations/cookie", cookie_name, quote(encrypted_cookie, safe=''),
59 ),
60 headers = {'Authorization' : 'token %s' % hub_api_key},
61 )
62 if r.status_code == 404:
63 data = None
64 elif r.status_code == 403:
65 self.log.error("I don't have permission to verify cookies, my auth token may have expired: [%i] %s", r.status_code, r.reason)
66 raise HTTPError(500, "Permission failure checking authorization, I may need to be restarted")
67 elif r.status_code >= 500:
68 self.log.error("Upstream failure verifying auth token: [%i] %s", r.status_code, r.reason)
69 raise HTTPError(502, "Failed to check authorization (upstream problem)")
70 elif r.status_code >= 400:
71 self.log.warn("Failed to check authorization: [%i] %s", r.status_code, r.reason)
72 raise HTTPError(500, "Failed to check authorization")
73 else:
74 data = r.json()
75 cookie_cache[encrypted_cookie] = data
76 return data
77
78 @staticmethod
79 def get_user(self):
80 """alternative get_current_user to query the central server"""
81 # only allow this to be called once per handler
82 # avoids issues if an error is raised,
83 # since this may be called again when trying to render the error page
84 if hasattr(self, '_cached_user'):
85 return self._cached_user
86
87 self._cached_user = None
88 my_user = self.settings['user']
89 encrypted_cookie = self.get_cookie(self.cookie_name)
90 if encrypted_cookie:
91 auth_data = JupyterHubLoginHandler.verify_token(self, self.cookie_name, encrypted_cookie)
92 if not auth_data:
93 # treat invalid token the same as no token
94 return None
95 user = auth_data['name']
96 if user == my_user:
97 self._cached_user = user
98 return user
99 else:
100 return None
101 else:
102 self.log.debug("No token cookie")
103 return None
104
105
106 class JupyterHubLogoutHandler(LogoutHandler):
107 def get(self):
108 self.redirect(url_path_join(self.settings['hub_prefix'], 'logout'))
109
110
111 # register new hub related command-line aliases
112 aliases = dict(notebook_aliases)
113 aliases.update({
114 'user' : 'SingleUserNotebookApp.user',
115 'cookie-name': 'SingleUserNotebookApp.cookie_name',
116 'hub-prefix': 'SingleUserNotebookApp.hub_prefix',
117 'hub-api-url': 'SingleUserNotebookApp.hub_api_url',
118 'base-url': 'SingleUserNotebookApp.base_url',
119 })
120
121 page_template = """
122 {% extends "templates/page.html" %}
123
124 {% block header_buttons %}
125 {{super()}}
126
127 <a href='{{hub_control_panel_url}}'
128 class='btn btn-default btn-sm navbar-btn pull-right'
129 style='margin-right: 4px; margin-left: 2px;'
130 >
131 Control Panel</a>
132 {% endblock %}
133 """
134
135 class SingleUserNotebookApp(NotebookApp):
136 """A Subclass of the regular NotebookApp that is aware of the parent multiuser context."""
137 user = CUnicode(config=True)
138 def _user_changed(self, name, old, new):
139 self.log.name = new
140 cookie_name = Unicode(config=True)
141 hub_prefix = Unicode(config=True)
142 hub_api_url = Unicode(config=True)
143 aliases = aliases
144 open_browser = False
145 trust_xheaders = True
146 login_handler_class = JupyterHubLoginHandler
147 logout_handler_class = JupyterHubLogoutHandler
148
149 cookie_cache_lifetime = Integer(
150 config=True,
151 default_value=300,
152 allow_none=True,
153 help="""
154 Time, in seconds, that we cache a validated cookie before requiring
155 revalidation with the hub.
156 """,
157 )
158
159 def _log_datefmt_default(self):
160 """Exclude date from default date format"""
161 return "%Y-%m-%d %H:%M:%S"
162
163 def _log_format_default(self):
164 """override default log format to include time"""
165 return "%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s %(module)s:%(lineno)d]%(end_color)s %(message)s"
166
167 def _confirm_exit(self):
168 # disable the exit confirmation for background notebook processes
169 ioloop.IOLoop.instance().stop()
170
171 def _clear_cookie_cache(self):
172 self.log.debug("Clearing cookie cache")
173 self.tornado_settings['cookie_cache'].clear()
174
175 def start(self):
176 # Start a PeriodicCallback to clear cached cookies. This forces us to
177 # revalidate our user with the Hub at least every
178 # `cookie_cache_lifetime` seconds.
179 if self.cookie_cache_lifetime:
180 ioloop.PeriodicCallback(
181 self._clear_cookie_cache,
182 self.cookie_cache_lifetime * 1e3,
183 ).start()
184 super(SingleUserNotebookApp, self).start()
185
186 def init_webapp(self):
187 # load the hub related settings into the tornado settings dict
188 env = os.environ
189 s = self.tornado_settings
190 s['cookie_cache'] = {}
191 s['user'] = self.user
192 s['hub_api_key'] = env.pop('JPY_API_TOKEN')
193 s['hub_prefix'] = self.hub_prefix
194 s['cookie_name'] = self.cookie_name
195 s['login_url'] = self.hub_prefix
196 s['hub_api_url'] = self.hub_api_url
197
198 super(SingleUserNotebookApp, self).init_webapp()
199 self.patch_templates()
200
201 def patch_templates(self):
202 """Patch page templates to add Hub-related buttons"""
203 env = self.web_app.settings['jinja2_env']
204
205 env.globals['hub_control_panel_url'] = \
206 url_path_join(self.hub_prefix, 'home')
207
208 # patch jinja env loading to modify page template
209 def get_page(name):
210 if name == 'page.html':
211 return page_template
212
213 orig_loader = env.loader
214 env.loader = ChoiceLoader([
215 FunctionLoader(get_page),
216 orig_loader,
217 ])
218
219
220 def main():
221 return SingleUserNotebookApp.launch_instance()
222
223
224 if __name__ == "__main__":
225 main()
226
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/jupyterhub/singleuser.py b/jupyterhub/singleuser.py
--- a/jupyterhub/singleuser.py
+++ b/jupyterhub/singleuser.py
@@ -17,7 +17,7 @@
from tornado import ioloop
from tornado.web import HTTPError
-from traitlets import (
+from IPython.utils.traitlets import (
Integer,
Unicode,
CUnicode,
|
{"golden_diff": "diff --git a/jupyterhub/singleuser.py b/jupyterhub/singleuser.py\n--- a/jupyterhub/singleuser.py\n+++ b/jupyterhub/singleuser.py\n@@ -17,7 +17,7 @@\n from tornado import ioloop\n from tornado.web import HTTPError\n \n-from traitlets import (\n+from IPython.utils.traitlets import (\n Integer,\n Unicode,\n CUnicode,\n", "issue": "Single user server launch is broken\nI think that #261 broke the launching of the single user server. I am seeing the following errors in the nbgrader tests:\n\n```\nTraceback (most recent call last):\n File \"/Users/jhamrick/.virtualenvs/nbgrader/bin/jupyterhub-singleuser\", line 6, in <module>\n exec(compile(open(__file__).read(), __file__, 'exec'))\n File \"/Users/jhamrick/project/tools/jupyterhub/scripts/jupyterhub-singleuser\", line 4, in <module>\n main()\n File \"/Users/jhamrick/project/tools/jupyterhub/jupyterhub/singleuser.py\", line 221, in main\n return SingleUserNotebookApp.launch_instance()\n File \"/Users/jhamrick/.virtualenvs/nbgrader/lib/python3.4/site-packages/IPython/config/application.py\", line 573, in launch_instance\n app.initialize(argv)\n File \"<string>\", line 2, in initialize\n File \"/Users/jhamrick/.virtualenvs/nbgrader/lib/python3.4/site-packages/IPython/config/application.py\", line 75, in catch_config_error\n return method(app, *args, **kwargs)\n File \"/Users/jhamrick/.virtualenvs/nbgrader/lib/python3.4/site-packages/IPython/html/notebookapp.py\", line 1015, in initialize\n self.init_webapp()\n File \"/Users/jhamrick/project/tools/jupyterhub/jupyterhub/singleuser.py\", line 191, in init_webapp\n s['user'] = self.user\n File \"/Users/jhamrick/.virtualenvs/nbgrader/lib/python3.4/site-packages/traitlets/traitlets.py\", line 438, in __get__\n % (self.name, obj))\ntraitlets.traitlets.TraitError: No default value found for None trait of <jupyterhub.singleuser.SingleUserNotebookApp object at 0x102953b00>\n```\n\nIf I revert to the version of jupyterhub prior to that PR, this error does not occur. @epifanio reported on gitter seeing the same thing as well, so I don't think it's isolated to nbgrader.\n\nGiven the error message, I suspect this has to do with ipython/traitlets#39 and/or ipython/traitlets#40 though I haven't actually tested it. I tried giving the `user` trait a default value but it did not seem to fix the error. I will try to do a bit more debugging, but I fear I don't really understand the internals of traitlets well enough to know exactly what's going on here.\n\nPing @takluyver and @minrk ?\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\"\"\"Extend regular notebook server to be aware of multiuser things.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport os\ntry:\n from urllib.parse import quote\nexcept ImportError:\n # PY2 Compat\n from urllib import quote\n\nimport requests\nfrom jinja2 import ChoiceLoader, FunctionLoader\n\nfrom tornado import ioloop\nfrom tornado.web import HTTPError\n\nfrom traitlets import (\n Integer,\n Unicode,\n CUnicode,\n)\n\nfrom IPython.html.notebookapp import NotebookApp, aliases as notebook_aliases\nfrom IPython.html.auth.login import LoginHandler\nfrom IPython.html.auth.logout import LogoutHandler\n\nfrom IPython.html.utils import url_path_join\n\n\nfrom distutils.version import LooseVersion as V\n\nimport IPython\nif V(IPython.__version__) < V('3.0'):\n raise ImportError(\"JupyterHub Requires IPython >= 3.0, found %s\" % IPython.__version__)\n\n# Define two methods to attach to AuthenticatedHandler,\n# which authenticate via the central auth server.\n\nclass JupyterHubLoginHandler(LoginHandler):\n @staticmethod\n def login_available(settings):\n return True\n \n @staticmethod\n def verify_token(self, cookie_name, encrypted_cookie):\n \"\"\"method for token verification\"\"\"\n cookie_cache = self.settings['cookie_cache']\n if encrypted_cookie in cookie_cache:\n # we've seen this token before, don't ask upstream again\n return cookie_cache[encrypted_cookie]\n \n hub_api_url = self.settings['hub_api_url']\n hub_api_key = self.settings['hub_api_key']\n r = requests.get(url_path_join(\n hub_api_url, \"authorizations/cookie\", cookie_name, quote(encrypted_cookie, safe=''),\n ),\n headers = {'Authorization' : 'token %s' % hub_api_key},\n )\n if r.status_code == 404:\n data = None\n elif r.status_code == 403:\n self.log.error(\"I don't have permission to verify cookies, my auth token may have expired: [%i] %s\", r.status_code, r.reason)\n raise HTTPError(500, \"Permission failure checking authorization, I may need to be restarted\")\n elif r.status_code >= 500:\n self.log.error(\"Upstream failure verifying auth token: [%i] %s\", r.status_code, r.reason)\n raise HTTPError(502, \"Failed to check authorization (upstream problem)\")\n elif r.status_code >= 400:\n self.log.warn(\"Failed to check authorization: [%i] %s\", r.status_code, r.reason)\n raise HTTPError(500, \"Failed to check authorization\")\n else:\n data = r.json()\n cookie_cache[encrypted_cookie] = data\n return data\n \n @staticmethod\n def get_user(self):\n \"\"\"alternative get_current_user to query the central server\"\"\"\n # only allow this to be called once per handler\n # avoids issues if an error is raised,\n # since this may be called again when trying to render the error page\n if hasattr(self, '_cached_user'):\n return self._cached_user\n \n self._cached_user = None\n my_user = self.settings['user']\n encrypted_cookie = self.get_cookie(self.cookie_name)\n if encrypted_cookie:\n auth_data = JupyterHubLoginHandler.verify_token(self, self.cookie_name, encrypted_cookie)\n if not auth_data:\n # treat invalid token the same as no token\n return None\n user = auth_data['name']\n if user == my_user:\n self._cached_user = user\n return user\n else:\n return None\n else:\n self.log.debug(\"No token cookie\")\n return None\n\n\nclass JupyterHubLogoutHandler(LogoutHandler):\n def get(self):\n self.redirect(url_path_join(self.settings['hub_prefix'], 'logout'))\n\n\n# register new hub related command-line aliases\naliases = dict(notebook_aliases)\naliases.update({\n 'user' : 'SingleUserNotebookApp.user',\n 'cookie-name': 'SingleUserNotebookApp.cookie_name',\n 'hub-prefix': 'SingleUserNotebookApp.hub_prefix',\n 'hub-api-url': 'SingleUserNotebookApp.hub_api_url',\n 'base-url': 'SingleUserNotebookApp.base_url',\n})\n\npage_template = \"\"\"\n{% extends \"templates/page.html\" %}\n\n{% block header_buttons %}\n{{super()}}\n\n<a href='{{hub_control_panel_url}}'\n class='btn btn-default btn-sm navbar-btn pull-right'\n style='margin-right: 4px; margin-left: 2px;'\n>\nControl Panel</a>\n{% endblock %}\n\"\"\"\n\nclass SingleUserNotebookApp(NotebookApp):\n \"\"\"A Subclass of the regular NotebookApp that is aware of the parent multiuser context.\"\"\"\n user = CUnicode(config=True)\n def _user_changed(self, name, old, new):\n self.log.name = new\n cookie_name = Unicode(config=True)\n hub_prefix = Unicode(config=True)\n hub_api_url = Unicode(config=True)\n aliases = aliases\n open_browser = False\n trust_xheaders = True\n login_handler_class = JupyterHubLoginHandler\n logout_handler_class = JupyterHubLogoutHandler\n\n cookie_cache_lifetime = Integer(\n config=True,\n default_value=300,\n allow_none=True,\n help=\"\"\"\n Time, in seconds, that we cache a validated cookie before requiring\n revalidation with the hub.\n \"\"\",\n )\n\n def _log_datefmt_default(self):\n \"\"\"Exclude date from default date format\"\"\"\n return \"%Y-%m-%d %H:%M:%S\"\n\n def _log_format_default(self):\n \"\"\"override default log format to include time\"\"\"\n return \"%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s %(module)s:%(lineno)d]%(end_color)s %(message)s\"\n\n def _confirm_exit(self):\n # disable the exit confirmation for background notebook processes\n ioloop.IOLoop.instance().stop()\n\n def _clear_cookie_cache(self):\n self.log.debug(\"Clearing cookie cache\")\n self.tornado_settings['cookie_cache'].clear()\n \n def start(self):\n # Start a PeriodicCallback to clear cached cookies. This forces us to\n # revalidate our user with the Hub at least every\n # `cookie_cache_lifetime` seconds.\n if self.cookie_cache_lifetime:\n ioloop.PeriodicCallback(\n self._clear_cookie_cache,\n self.cookie_cache_lifetime * 1e3,\n ).start()\n super(SingleUserNotebookApp, self).start()\n \n def init_webapp(self):\n # load the hub related settings into the tornado settings dict\n env = os.environ\n s = self.tornado_settings\n s['cookie_cache'] = {}\n s['user'] = self.user\n s['hub_api_key'] = env.pop('JPY_API_TOKEN')\n s['hub_prefix'] = self.hub_prefix\n s['cookie_name'] = self.cookie_name\n s['login_url'] = self.hub_prefix\n s['hub_api_url'] = self.hub_api_url\n \n super(SingleUserNotebookApp, self).init_webapp()\n self.patch_templates()\n \n def patch_templates(self):\n \"\"\"Patch page templates to add Hub-related buttons\"\"\"\n env = self.web_app.settings['jinja2_env']\n \n env.globals['hub_control_panel_url'] = \\\n url_path_join(self.hub_prefix, 'home')\n \n # patch jinja env loading to modify page template\n def get_page(name):\n if name == 'page.html':\n return page_template\n \n orig_loader = env.loader\n env.loader = ChoiceLoader([\n FunctionLoader(get_page),\n orig_loader,\n ])\n\n\ndef main():\n return SingleUserNotebookApp.launch_instance()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "jupyterhub/singleuser.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\"\"\"Extend regular notebook server to be aware of multiuser things.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport os\ntry:\n from urllib.parse import quote\nexcept ImportError:\n # PY2 Compat\n from urllib import quote\n\nimport requests\nfrom jinja2 import ChoiceLoader, FunctionLoader\n\nfrom tornado import ioloop\nfrom tornado.web import HTTPError\n\nfrom IPython.utils.traitlets import (\n Integer,\n Unicode,\n CUnicode,\n)\n\nfrom IPython.html.notebookapp import NotebookApp, aliases as notebook_aliases\nfrom IPython.html.auth.login import LoginHandler\nfrom IPython.html.auth.logout import LogoutHandler\n\nfrom IPython.html.utils import url_path_join\n\n\nfrom distutils.version import LooseVersion as V\n\nimport IPython\nif V(IPython.__version__) < V('3.0'):\n raise ImportError(\"JupyterHub Requires IPython >= 3.0, found %s\" % IPython.__version__)\n\n# Define two methods to attach to AuthenticatedHandler,\n# which authenticate via the central auth server.\n\nclass JupyterHubLoginHandler(LoginHandler):\n @staticmethod\n def login_available(settings):\n return True\n \n @staticmethod\n def verify_token(self, cookie_name, encrypted_cookie):\n \"\"\"method for token verification\"\"\"\n cookie_cache = self.settings['cookie_cache']\n if encrypted_cookie in cookie_cache:\n # we've seen this token before, don't ask upstream again\n return cookie_cache[encrypted_cookie]\n \n hub_api_url = self.settings['hub_api_url']\n hub_api_key = self.settings['hub_api_key']\n r = requests.get(url_path_join(\n hub_api_url, \"authorizations/cookie\", cookie_name, quote(encrypted_cookie, safe=''),\n ),\n headers = {'Authorization' : 'token %s' % hub_api_key},\n )\n if r.status_code == 404:\n data = None\n elif r.status_code == 403:\n self.log.error(\"I don't have permission to verify cookies, my auth token may have expired: [%i] %s\", r.status_code, r.reason)\n raise HTTPError(500, \"Permission failure checking authorization, I may need to be restarted\")\n elif r.status_code >= 500:\n self.log.error(\"Upstream failure verifying auth token: [%i] %s\", r.status_code, r.reason)\n raise HTTPError(502, \"Failed to check authorization (upstream problem)\")\n elif r.status_code >= 400:\n self.log.warn(\"Failed to check authorization: [%i] %s\", r.status_code, r.reason)\n raise HTTPError(500, \"Failed to check authorization\")\n else:\n data = r.json()\n cookie_cache[encrypted_cookie] = data\n return data\n \n @staticmethod\n def get_user(self):\n \"\"\"alternative get_current_user to query the central server\"\"\"\n # only allow this to be called once per handler\n # avoids issues if an error is raised,\n # since this may be called again when trying to render the error page\n if hasattr(self, '_cached_user'):\n return self._cached_user\n \n self._cached_user = None\n my_user = self.settings['user']\n encrypted_cookie = self.get_cookie(self.cookie_name)\n if encrypted_cookie:\n auth_data = JupyterHubLoginHandler.verify_token(self, self.cookie_name, encrypted_cookie)\n if not auth_data:\n # treat invalid token the same as no token\n return None\n user = auth_data['name']\n if user == my_user:\n self._cached_user = user\n return user\n else:\n return None\n else:\n self.log.debug(\"No token cookie\")\n return None\n\n\nclass JupyterHubLogoutHandler(LogoutHandler):\n def get(self):\n self.redirect(url_path_join(self.settings['hub_prefix'], 'logout'))\n\n\n# register new hub related command-line aliases\naliases = dict(notebook_aliases)\naliases.update({\n 'user' : 'SingleUserNotebookApp.user',\n 'cookie-name': 'SingleUserNotebookApp.cookie_name',\n 'hub-prefix': 'SingleUserNotebookApp.hub_prefix',\n 'hub-api-url': 'SingleUserNotebookApp.hub_api_url',\n 'base-url': 'SingleUserNotebookApp.base_url',\n})\n\npage_template = \"\"\"\n{% extends \"templates/page.html\" %}\n\n{% block header_buttons %}\n{{super()}}\n\n<a href='{{hub_control_panel_url}}'\n class='btn btn-default btn-sm navbar-btn pull-right'\n style='margin-right: 4px; margin-left: 2px;'\n>\nControl Panel</a>\n{% endblock %}\n\"\"\"\n\nclass SingleUserNotebookApp(NotebookApp):\n \"\"\"A Subclass of the regular NotebookApp that is aware of the parent multiuser context.\"\"\"\n user = CUnicode(config=True)\n def _user_changed(self, name, old, new):\n self.log.name = new\n cookie_name = Unicode(config=True)\n hub_prefix = Unicode(config=True)\n hub_api_url = Unicode(config=True)\n aliases = aliases\n open_browser = False\n trust_xheaders = True\n login_handler_class = JupyterHubLoginHandler\n logout_handler_class = JupyterHubLogoutHandler\n\n cookie_cache_lifetime = Integer(\n config=True,\n default_value=300,\n allow_none=True,\n help=\"\"\"\n Time, in seconds, that we cache a validated cookie before requiring\n revalidation with the hub.\n \"\"\",\n )\n\n def _log_datefmt_default(self):\n \"\"\"Exclude date from default date format\"\"\"\n return \"%Y-%m-%d %H:%M:%S\"\n\n def _log_format_default(self):\n \"\"\"override default log format to include time\"\"\"\n return \"%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s %(module)s:%(lineno)d]%(end_color)s %(message)s\"\n\n def _confirm_exit(self):\n # disable the exit confirmation for background notebook processes\n ioloop.IOLoop.instance().stop()\n\n def _clear_cookie_cache(self):\n self.log.debug(\"Clearing cookie cache\")\n self.tornado_settings['cookie_cache'].clear()\n \n def start(self):\n # Start a PeriodicCallback to clear cached cookies. This forces us to\n # revalidate our user with the Hub at least every\n # `cookie_cache_lifetime` seconds.\n if self.cookie_cache_lifetime:\n ioloop.PeriodicCallback(\n self._clear_cookie_cache,\n self.cookie_cache_lifetime * 1e3,\n ).start()\n super(SingleUserNotebookApp, self).start()\n \n def init_webapp(self):\n # load the hub related settings into the tornado settings dict\n env = os.environ\n s = self.tornado_settings\n s['cookie_cache'] = {}\n s['user'] = self.user\n s['hub_api_key'] = env.pop('JPY_API_TOKEN')\n s['hub_prefix'] = self.hub_prefix\n s['cookie_name'] = self.cookie_name\n s['login_url'] = self.hub_prefix\n s['hub_api_url'] = self.hub_api_url\n \n super(SingleUserNotebookApp, self).init_webapp()\n self.patch_templates()\n \n def patch_templates(self):\n \"\"\"Patch page templates to add Hub-related buttons\"\"\"\n env = self.web_app.settings['jinja2_env']\n \n env.globals['hub_control_panel_url'] = \\\n url_path_join(self.hub_prefix, 'home')\n \n # patch jinja env loading to modify page template\n def get_page(name):\n if name == 'page.html':\n return page_template\n \n orig_loader = env.loader\n env.loader = ChoiceLoader([\n FunctionLoader(get_page),\n orig_loader,\n ])\n\n\ndef main():\n return SingleUserNotebookApp.launch_instance()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "jupyterhub/singleuser.py"}]}
| 3,176 | 89 |
gh_patches_debug_774
|
rasdani/github-patches
|
git_diff
|
getredash__redash-2501
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Non blocking widget refresh indicator
When refreshing a dashboard widget the previous results are hidden by the refresh animation. This can be an issue when refreshing a dashboard frequently, as you might happen to see the spinner for long period of times.
To solve this we can keep showing the old data until new one is available, while showing some indication that refresh is in progress.
Is the following animation enough?

After refreshing a dashboard, widgets become draggable even when not in edit mode
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/handlers/widgets.py`
Content:
```
1 import json
2
3 from flask import request
4 from redash import models
5 from redash.handlers.base import BaseResource
6 from redash.permissions import (require_access,
7 require_object_modify_permission,
8 require_permission, view_only)
9
10
11 class WidgetListResource(BaseResource):
12 @require_permission('edit_dashboard')
13 def post(self):
14 """
15 Add a widget to a dashboard.
16
17 :<json number dashboard_id: The ID for the dashboard being added to
18 :<json visualization_id: The ID of the visualization to put in this widget
19 :<json object options: Widget options
20 :<json string text: Text box contents
21 :<json number width: Width for widget display
22
23 :>json object widget: The created widget
24 """
25 widget_properties = request.get_json(force=True)
26 dashboard = models.Dashboard.get_by_id_and_org(widget_properties.pop('dashboard_id'), self.current_org)
27 require_object_modify_permission(dashboard, self.current_user)
28
29 widget_properties['options'] = json.dumps(widget_properties['options'])
30 widget_properties.pop('id', None)
31 widget_properties['dashboard'] = dashboard
32
33 visualization_id = widget_properties.pop('visualization_id')
34 if visualization_id:
35 visualization = models.Visualization.get_by_id_and_org(visualization_id, self.current_org)
36 require_access(visualization.query_rel.groups, self.current_user, view_only)
37 else:
38 visualization = None
39
40 widget_properties['visualization'] = visualization
41
42 widget = models.Widget(**widget_properties)
43 models.db.session.add(widget)
44 models.db.session.commit()
45
46 models.db.session.commit()
47 return {'widget': widget.to_dict()}
48
49
50 class WidgetResource(BaseResource):
51 @require_permission('edit_dashboard')
52 def post(self, widget_id):
53 """
54 Updates a widget in a dashboard.
55 This method currently handles Text Box widgets only.
56
57 :param number widget_id: The ID of the widget to modify
58
59 :<json string text: The new contents of the text box
60 """
61 widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)
62 require_object_modify_permission(widget.dashboard, self.current_user)
63 widget_properties = request.get_json(force=True)
64 widget.text = widget_properties['text']
65 widget.options = json.dumps(widget_properties['options'])
66 models.db.session.commit()
67 return widget.to_dict()
68
69 @require_permission('edit_dashboard')
70 def delete(self, widget_id):
71 """
72 Remove a widget from a dashboard.
73
74 :param number widget_id: ID of widget to remove
75 """
76 widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)
77 require_object_modify_permission(widget.dashboard, self.current_user)
78 models.db.session.delete(widget)
79 models.db.session.commit()
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/redash/handlers/widgets.py b/redash/handlers/widgets.py
--- a/redash/handlers/widgets.py
+++ b/redash/handlers/widgets.py
@@ -44,7 +44,7 @@
models.db.session.commit()
models.db.session.commit()
- return {'widget': widget.to_dict()}
+ return widget.to_dict()
class WidgetResource(BaseResource):
|
{"golden_diff": "diff --git a/redash/handlers/widgets.py b/redash/handlers/widgets.py\n--- a/redash/handlers/widgets.py\n+++ b/redash/handlers/widgets.py\n@@ -44,7 +44,7 @@\n models.db.session.commit()\n \n models.db.session.commit()\n- return {'widget': widget.to_dict()}\n+ return widget.to_dict()\n \n \n class WidgetResource(BaseResource):\n", "issue": "Non blocking widget refresh indicator\nWhen refreshing a dashboard widget the previous results are hidden by the refresh animation. This can be an issue when refreshing a dashboard frequently, as you might happen to see the spinner for long period of times.\r\n\r\nTo solve this we can keep showing the old data until new one is available, while showing some indication that refresh is in progress.\r\n\r\nIs the following animation enough?\r\n\r\n\nAfter refreshing a dashboard, widgets become draggable even when not in edit mode\n\n", "before_files": [{"content": "import json\n\nfrom flask import request\nfrom redash import models\nfrom redash.handlers.base import BaseResource\nfrom redash.permissions import (require_access,\n require_object_modify_permission,\n require_permission, view_only)\n\n\nclass WidgetListResource(BaseResource):\n @require_permission('edit_dashboard')\n def post(self):\n \"\"\"\n Add a widget to a dashboard.\n\n :<json number dashboard_id: The ID for the dashboard being added to\n :<json visualization_id: The ID of the visualization to put in this widget\n :<json object options: Widget options\n :<json string text: Text box contents\n :<json number width: Width for widget display\n\n :>json object widget: The created widget\n \"\"\"\n widget_properties = request.get_json(force=True)\n dashboard = models.Dashboard.get_by_id_and_org(widget_properties.pop('dashboard_id'), self.current_org)\n require_object_modify_permission(dashboard, self.current_user)\n\n widget_properties['options'] = json.dumps(widget_properties['options'])\n widget_properties.pop('id', None)\n widget_properties['dashboard'] = dashboard\n\n visualization_id = widget_properties.pop('visualization_id')\n if visualization_id:\n visualization = models.Visualization.get_by_id_and_org(visualization_id, self.current_org)\n require_access(visualization.query_rel.groups, self.current_user, view_only)\n else:\n visualization = None\n\n widget_properties['visualization'] = visualization\n\n widget = models.Widget(**widget_properties)\n models.db.session.add(widget)\n models.db.session.commit()\n\n models.db.session.commit()\n return {'widget': widget.to_dict()}\n\n\nclass WidgetResource(BaseResource):\n @require_permission('edit_dashboard')\n def post(self, widget_id):\n \"\"\"\n Updates a widget in a dashboard.\n This method currently handles Text Box widgets only.\n\n :param number widget_id: The ID of the widget to modify\n\n :<json string text: The new contents of the text box\n \"\"\"\n widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)\n require_object_modify_permission(widget.dashboard, self.current_user)\n widget_properties = request.get_json(force=True)\n widget.text = widget_properties['text']\n widget.options = json.dumps(widget_properties['options'])\n models.db.session.commit()\n return widget.to_dict()\n\n @require_permission('edit_dashboard')\n def delete(self, widget_id):\n \"\"\"\n Remove a widget from a dashboard.\n\n :param number widget_id: ID of widget to remove\n \"\"\"\n widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)\n require_object_modify_permission(widget.dashboard, self.current_user)\n models.db.session.delete(widget)\n models.db.session.commit()\n", "path": "redash/handlers/widgets.py"}], "after_files": [{"content": "import json\n\nfrom flask import request\nfrom redash import models\nfrom redash.handlers.base import BaseResource\nfrom redash.permissions import (require_access,\n require_object_modify_permission,\n require_permission, view_only)\n\n\nclass WidgetListResource(BaseResource):\n @require_permission('edit_dashboard')\n def post(self):\n \"\"\"\n Add a widget to a dashboard.\n\n :<json number dashboard_id: The ID for the dashboard being added to\n :<json visualization_id: The ID of the visualization to put in this widget\n :<json object options: Widget options\n :<json string text: Text box contents\n :<json number width: Width for widget display\n\n :>json object widget: The created widget\n \"\"\"\n widget_properties = request.get_json(force=True)\n dashboard = models.Dashboard.get_by_id_and_org(widget_properties.pop('dashboard_id'), self.current_org)\n require_object_modify_permission(dashboard, self.current_user)\n\n widget_properties['options'] = json.dumps(widget_properties['options'])\n widget_properties.pop('id', None)\n widget_properties['dashboard'] = dashboard\n\n visualization_id = widget_properties.pop('visualization_id')\n if visualization_id:\n visualization = models.Visualization.get_by_id_and_org(visualization_id, self.current_org)\n require_access(visualization.query_rel.groups, self.current_user, view_only)\n else:\n visualization = None\n\n widget_properties['visualization'] = visualization\n\n widget = models.Widget(**widget_properties)\n models.db.session.add(widget)\n models.db.session.commit()\n\n models.db.session.commit()\n return widget.to_dict()\n\n\nclass WidgetResource(BaseResource):\n @require_permission('edit_dashboard')\n def post(self, widget_id):\n \"\"\"\n Updates a widget in a dashboard.\n This method currently handles Text Box widgets only.\n\n :param number widget_id: The ID of the widget to modify\n\n :<json string text: The new contents of the text box\n \"\"\"\n widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)\n require_object_modify_permission(widget.dashboard, self.current_user)\n widget_properties = request.get_json(force=True)\n widget.text = widget_properties['text']\n widget.options = json.dumps(widget_properties['options'])\n models.db.session.commit()\n return widget.to_dict()\n\n @require_permission('edit_dashboard')\n def delete(self, widget_id):\n \"\"\"\n Remove a widget from a dashboard.\n\n :param number widget_id: ID of widget to remove\n \"\"\"\n widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)\n require_object_modify_permission(widget.dashboard, self.current_user)\n models.db.session.delete(widget)\n models.db.session.commit()\n", "path": "redash/handlers/widgets.py"}]}
| 1,104 | 89 |
gh_patches_debug_845
|
rasdani/github-patches
|
git_diff
|
PrefectHQ__prefect-2056
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AuthorizationError when watching logs from CLI
When running with `prefect run cloud --logs`, after a few minutes I see the following error:
```
prefect.utilities.exceptions.AuthorizationError: [{'message': 'AuthenticationError', 'locations': [], 'path': ['flow_run'], 'extensions': {'code': 'UNAUTHENTICATED'}}]
```
The run itself succeeds but the logs stop at that point, so I guess the token is initially valid but just expires...?
cc @joshmeek @cicdw
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/prefect/cli/run.py`
Content:
```
1 import json
2 import time
3
4 import click
5 from tabulate import tabulate
6
7 from prefect.client import Client
8 from prefect.utilities.graphql import EnumValue, with_args
9
10
11 @click.group(hidden=True)
12 def run():
13 """
14 Run Prefect flows.
15
16 \b
17 Usage:
18 $ prefect run [STORAGE/PLATFORM]
19
20 \b
21 Arguments:
22 cloud Run flows in Prefect Cloud
23
24 \b
25 Examples:
26 $ prefect run cloud --name Test-Flow --project My-Project
27 Flow Run ID: 2ba3rrfd-411c-4d99-bb2a-f64a6dea78f9
28
29 \b
30 $ prefect run cloud --name Test-Flow --project My-Project --watch
31 Flow Run ID: 2ba3rrfd-411c-4d99-bb2a-f64a6dea78f9
32 Scheduled -> Submitted -> Running -> Success
33 """
34
35
36 @run.command(hidden=True)
37 @click.option(
38 "--name", "-n", required=True, help="The name of a flow to run.", hidden=True
39 )
40 @click.option(
41 "--project",
42 "-p",
43 required=True,
44 help="The project that contains the flow.",
45 hidden=True,
46 )
47 @click.option("--version", "-v", type=int, help="A flow version to run.", hidden=True)
48 @click.option(
49 "--parameters-file",
50 "-pf",
51 help="A parameters JSON file.",
52 hidden=True,
53 type=click.Path(exists=True),
54 )
55 @click.option(
56 "--parameters-string", "-ps", help="A parameters JSON string.", hidden=True
57 )
58 @click.option("--run-name", "-rn", help="A name to assign for this run.", hidden=True)
59 @click.option(
60 "--watch",
61 "-w",
62 is_flag=True,
63 help="Watch current state of the flow run.",
64 hidden=True,
65 )
66 @click.option(
67 "--logs", "-l", is_flag=True, help="Live logs of the flow run.", hidden=True
68 )
69 def cloud(
70 name, project, version, parameters_file, parameters_string, run_name, watch, logs
71 ):
72 """
73 Run a registered flow in Prefect Cloud.
74
75 \b
76 Options:
77 --name, -n TEXT The name of a flow to run [required]
78 --project, -p TEXT The name of a project that contains the flow [required]
79 --version, -v INTEGER A flow version to run
80 --parameters-file, -pf FILE PATH A filepath of a JSON file containing parameters
81 --parameters-string, -ps TEXT A string of JSON parameters
82 --run-name, -rn TEXT A name to assign for this run
83 --watch, -w Watch current state of the flow run, stream output to stdout
84 --logs, -l Get logs of the flow run, stream output to stdout
85
86 \b
87 If both `--parameters-file` and `--parameters-string` are provided then the values passed
88 in through the string will override the values provided from the file.
89
90 \b
91 e.g.
92 File contains: {"a": 1, "b": 2}
93 String: '{"a": 3}'
94 Parameters passed to the flow run: {"a": 3, "b": 2}
95 """
96
97 if watch and logs:
98 click.secho(
99 "Streaming state and logs not currently supported together.", fg="red"
100 )
101 return
102
103 query = {
104 "query": {
105 with_args(
106 "flow",
107 {
108 "where": {
109 "_and": {
110 "name": {"_eq": name},
111 "version": {"_eq": version},
112 "project": {"name": {"_eq": project}},
113 }
114 },
115 "order_by": {
116 "name": EnumValue("asc"),
117 "version": EnumValue("desc"),
118 },
119 "distinct_on": EnumValue("name"),
120 },
121 ): {"id": True}
122 }
123 }
124
125 client = Client()
126 result = client.graphql(query)
127
128 flow_data = result.data.flow
129
130 if flow_data:
131 flow_id = flow_data[0].id
132 else:
133 click.secho("{} not found".format(name), fg="red")
134 return
135
136 # Load parameters from file if provided
137 file_params = {}
138 if parameters_file:
139 with open(parameters_file) as params_file:
140 file_params = json.load(params_file)
141
142 # Load parameters from string if provided
143 string_params = {}
144 if parameters_string:
145 string_params = json.loads(parameters_string)
146
147 flow_run_id = client.create_flow_run(
148 flow_id=flow_id, parameters={**file_params, **string_params}, run_name=run_name
149 )
150 click.echo("Flow Run ID: {}".format(flow_run_id))
151
152 if watch:
153 current_states = []
154 while True:
155 query = {
156 "query": {
157 with_args("flow_run_by_pk", {"id": flow_run_id}): {
158 with_args(
159 "states",
160 {"order_by": {EnumValue("timestamp"): EnumValue("asc")}},
161 ): {"state": True, "timestamp": True}
162 }
163 }
164 }
165
166 result = client.graphql(query)
167
168 # Filter through retrieved states and output in order
169 for state_index in result.data.flow_run_by_pk.states:
170 state = state_index.state
171 if state not in current_states:
172 if state != "Success" and state != "Failed":
173 click.echo("{} -> ".format(state), nl=False)
174 else:
175 click.echo(state)
176 return
177
178 current_states.append(state)
179
180 time.sleep(3)
181
182 if logs:
183 all_logs = []
184
185 log_query = {
186 with_args(
187 "logs", {"order_by": {EnumValue("timestamp"): EnumValue("asc")}}
188 ): {"timestamp": True, "message": True, "level": True},
189 "start_time": True,
190 }
191
192 query = {
193 "query": {
194 with_args(
195 "flow_run",
196 {
197 "where": {"id": {"_eq": flow_run_id}},
198 "order_by": {EnumValue("start_time"): EnumValue("desc")},
199 },
200 ): log_query
201 }
202 }
203
204 while True:
205 result = Client().graphql(query)
206
207 flow_run = result.data.flow_run
208 if not flow_run:
209 click.secho("{} not found".format(flow_run_id), fg="red")
210 return
211
212 new_run = flow_run[0]
213 logs = new_run.logs
214 output = []
215
216 for i in logs:
217 if [i.timestamp, i.level, i.message] not in all_logs:
218
219 if not len(all_logs):
220 click.echo(
221 tabulate(
222 [[i.timestamp, i.level, i.message]],
223 headers=["TIMESTAMP", "LEVEL", "MESSAGE"],
224 tablefmt="plain",
225 numalign="left",
226 stralign="left",
227 )
228 )
229 all_logs.append([i.timestamp, i.level, i.message])
230 continue
231
232 output.append([i.timestamp, i.level, i.message])
233 all_logs.append([i.timestamp, i.level, i.message])
234
235 if output:
236 click.echo(
237 tabulate(output, tablefmt="plain", numalign="left", stralign="left")
238 )
239
240 # Check if state is either Success or Failed, exit if it is
241 pk_query = {
242 "query": {
243 with_args("flow_run_by_pk", {"id": flow_run_id}): {"state": True}
244 }
245 }
246 result = client.graphql(pk_query)
247
248 if (
249 result.data.flow_run_by_pk.state == "Success"
250 or result.data.flow_run_by_pk.state == "Failed"
251 ):
252 return
253
254 time.sleep(3)
255
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/prefect/cli/run.py b/src/prefect/cli/run.py
--- a/src/prefect/cli/run.py
+++ b/src/prefect/cli/run.py
@@ -202,7 +202,7 @@
}
while True:
- result = Client().graphql(query)
+ result = client.graphql(query)
flow_run = result.data.flow_run
if not flow_run:
|
{"golden_diff": "diff --git a/src/prefect/cli/run.py b/src/prefect/cli/run.py\n--- a/src/prefect/cli/run.py\n+++ b/src/prefect/cli/run.py\n@@ -202,7 +202,7 @@\n }\n \n while True:\n- result = Client().graphql(query)\n+ result = client.graphql(query)\n \n flow_run = result.data.flow_run\n if not flow_run:\n", "issue": "AuthorizationError when watching logs from CLI\nWhen running with `prefect run cloud --logs`, after a few minutes I see the following error:\r\n```\r\nprefect.utilities.exceptions.AuthorizationError: [{'message': 'AuthenticationError', 'locations': [], 'path': ['flow_run'], 'extensions': {'code': 'UNAUTHENTICATED'}}]\r\n```\r\nThe run itself succeeds but the logs stop at that point, so I guess the token is initially valid but just expires...?\r\n\r\ncc @joshmeek @cicdw \n", "before_files": [{"content": "import json\nimport time\n\nimport click\nfrom tabulate import tabulate\n\nfrom prefect.client import Client\nfrom prefect.utilities.graphql import EnumValue, with_args\n\n\[email protected](hidden=True)\ndef run():\n \"\"\"\n Run Prefect flows.\n\n \\b\n Usage:\n $ prefect run [STORAGE/PLATFORM]\n\n \\b\n Arguments:\n cloud Run flows in Prefect Cloud\n\n \\b\n Examples:\n $ prefect run cloud --name Test-Flow --project My-Project\n Flow Run ID: 2ba3rrfd-411c-4d99-bb2a-f64a6dea78f9\n\n \\b\n $ prefect run cloud --name Test-Flow --project My-Project --watch\n Flow Run ID: 2ba3rrfd-411c-4d99-bb2a-f64a6dea78f9\n Scheduled -> Submitted -> Running -> Success\n \"\"\"\n\n\[email protected](hidden=True)\[email protected](\n \"--name\", \"-n\", required=True, help=\"The name of a flow to run.\", hidden=True\n)\[email protected](\n \"--project\",\n \"-p\",\n required=True,\n help=\"The project that contains the flow.\",\n hidden=True,\n)\[email protected](\"--version\", \"-v\", type=int, help=\"A flow version to run.\", hidden=True)\[email protected](\n \"--parameters-file\",\n \"-pf\",\n help=\"A parameters JSON file.\",\n hidden=True,\n type=click.Path(exists=True),\n)\[email protected](\n \"--parameters-string\", \"-ps\", help=\"A parameters JSON string.\", hidden=True\n)\[email protected](\"--run-name\", \"-rn\", help=\"A name to assign for this run.\", hidden=True)\[email protected](\n \"--watch\",\n \"-w\",\n is_flag=True,\n help=\"Watch current state of the flow run.\",\n hidden=True,\n)\[email protected](\n \"--logs\", \"-l\", is_flag=True, help=\"Live logs of the flow run.\", hidden=True\n)\ndef cloud(\n name, project, version, parameters_file, parameters_string, run_name, watch, logs\n):\n \"\"\"\n Run a registered flow in Prefect Cloud.\n\n \\b\n Options:\n --name, -n TEXT The name of a flow to run [required]\n --project, -p TEXT The name of a project that contains the flow [required]\n --version, -v INTEGER A flow version to run\n --parameters-file, -pf FILE PATH A filepath of a JSON file containing parameters\n --parameters-string, -ps TEXT A string of JSON parameters\n --run-name, -rn TEXT A name to assign for this run\n --watch, -w Watch current state of the flow run, stream output to stdout\n --logs, -l Get logs of the flow run, stream output to stdout\n\n \\b\n If both `--parameters-file` and `--parameters-string` are provided then the values passed\n in through the string will override the values provided from the file.\n\n \\b\n e.g.\n File contains: {\"a\": 1, \"b\": 2}\n String: '{\"a\": 3}'\n Parameters passed to the flow run: {\"a\": 3, \"b\": 2}\n \"\"\"\n\n if watch and logs:\n click.secho(\n \"Streaming state and logs not currently supported together.\", fg=\"red\"\n )\n return\n\n query = {\n \"query\": {\n with_args(\n \"flow\",\n {\n \"where\": {\n \"_and\": {\n \"name\": {\"_eq\": name},\n \"version\": {\"_eq\": version},\n \"project\": {\"name\": {\"_eq\": project}},\n }\n },\n \"order_by\": {\n \"name\": EnumValue(\"asc\"),\n \"version\": EnumValue(\"desc\"),\n },\n \"distinct_on\": EnumValue(\"name\"),\n },\n ): {\"id\": True}\n }\n }\n\n client = Client()\n result = client.graphql(query)\n\n flow_data = result.data.flow\n\n if flow_data:\n flow_id = flow_data[0].id\n else:\n click.secho(\"{} not found\".format(name), fg=\"red\")\n return\n\n # Load parameters from file if provided\n file_params = {}\n if parameters_file:\n with open(parameters_file) as params_file:\n file_params = json.load(params_file)\n\n # Load parameters from string if provided\n string_params = {}\n if parameters_string:\n string_params = json.loads(parameters_string)\n\n flow_run_id = client.create_flow_run(\n flow_id=flow_id, parameters={**file_params, **string_params}, run_name=run_name\n )\n click.echo(\"Flow Run ID: {}\".format(flow_run_id))\n\n if watch:\n current_states = []\n while True:\n query = {\n \"query\": {\n with_args(\"flow_run_by_pk\", {\"id\": flow_run_id}): {\n with_args(\n \"states\",\n {\"order_by\": {EnumValue(\"timestamp\"): EnumValue(\"asc\")}},\n ): {\"state\": True, \"timestamp\": True}\n }\n }\n }\n\n result = client.graphql(query)\n\n # Filter through retrieved states and output in order\n for state_index in result.data.flow_run_by_pk.states:\n state = state_index.state\n if state not in current_states:\n if state != \"Success\" and state != \"Failed\":\n click.echo(\"{} -> \".format(state), nl=False)\n else:\n click.echo(state)\n return\n\n current_states.append(state)\n\n time.sleep(3)\n\n if logs:\n all_logs = []\n\n log_query = {\n with_args(\n \"logs\", {\"order_by\": {EnumValue(\"timestamp\"): EnumValue(\"asc\")}}\n ): {\"timestamp\": True, \"message\": True, \"level\": True},\n \"start_time\": True,\n }\n\n query = {\n \"query\": {\n with_args(\n \"flow_run\",\n {\n \"where\": {\"id\": {\"_eq\": flow_run_id}},\n \"order_by\": {EnumValue(\"start_time\"): EnumValue(\"desc\")},\n },\n ): log_query\n }\n }\n\n while True:\n result = Client().graphql(query)\n\n flow_run = result.data.flow_run\n if not flow_run:\n click.secho(\"{} not found\".format(flow_run_id), fg=\"red\")\n return\n\n new_run = flow_run[0]\n logs = new_run.logs\n output = []\n\n for i in logs:\n if [i.timestamp, i.level, i.message] not in all_logs:\n\n if not len(all_logs):\n click.echo(\n tabulate(\n [[i.timestamp, i.level, i.message]],\n headers=[\"TIMESTAMP\", \"LEVEL\", \"MESSAGE\"],\n tablefmt=\"plain\",\n numalign=\"left\",\n stralign=\"left\",\n )\n )\n all_logs.append([i.timestamp, i.level, i.message])\n continue\n\n output.append([i.timestamp, i.level, i.message])\n all_logs.append([i.timestamp, i.level, i.message])\n\n if output:\n click.echo(\n tabulate(output, tablefmt=\"plain\", numalign=\"left\", stralign=\"left\")\n )\n\n # Check if state is either Success or Failed, exit if it is\n pk_query = {\n \"query\": {\n with_args(\"flow_run_by_pk\", {\"id\": flow_run_id}): {\"state\": True}\n }\n }\n result = client.graphql(pk_query)\n\n if (\n result.data.flow_run_by_pk.state == \"Success\"\n or result.data.flow_run_by_pk.state == \"Failed\"\n ):\n return\n\n time.sleep(3)\n", "path": "src/prefect/cli/run.py"}], "after_files": [{"content": "import json\nimport time\n\nimport click\nfrom tabulate import tabulate\n\nfrom prefect.client import Client\nfrom prefect.utilities.graphql import EnumValue, with_args\n\n\[email protected](hidden=True)\ndef run():\n \"\"\"\n Run Prefect flows.\n\n \\b\n Usage:\n $ prefect run [STORAGE/PLATFORM]\n\n \\b\n Arguments:\n cloud Run flows in Prefect Cloud\n\n \\b\n Examples:\n $ prefect run cloud --name Test-Flow --project My-Project\n Flow Run ID: 2ba3rrfd-411c-4d99-bb2a-f64a6dea78f9\n\n \\b\n $ prefect run cloud --name Test-Flow --project My-Project --watch\n Flow Run ID: 2ba3rrfd-411c-4d99-bb2a-f64a6dea78f9\n Scheduled -> Submitted -> Running -> Success\n \"\"\"\n\n\[email protected](hidden=True)\[email protected](\n \"--name\", \"-n\", required=True, help=\"The name of a flow to run.\", hidden=True\n)\[email protected](\n \"--project\",\n \"-p\",\n required=True,\n help=\"The project that contains the flow.\",\n hidden=True,\n)\[email protected](\"--version\", \"-v\", type=int, help=\"A flow version to run.\", hidden=True)\[email protected](\n \"--parameters-file\",\n \"-pf\",\n help=\"A parameters JSON file.\",\n hidden=True,\n type=click.Path(exists=True),\n)\[email protected](\n \"--parameters-string\", \"-ps\", help=\"A parameters JSON string.\", hidden=True\n)\[email protected](\"--run-name\", \"-rn\", help=\"A name to assign for this run.\", hidden=True)\[email protected](\n \"--watch\",\n \"-w\",\n is_flag=True,\n help=\"Watch current state of the flow run.\",\n hidden=True,\n)\[email protected](\n \"--logs\", \"-l\", is_flag=True, help=\"Live logs of the flow run.\", hidden=True\n)\ndef cloud(\n name, project, version, parameters_file, parameters_string, run_name, watch, logs\n):\n \"\"\"\n Run a registered flow in Prefect Cloud.\n\n \\b\n Options:\n --name, -n TEXT The name of a flow to run [required]\n --project, -p TEXT The name of a project that contains the flow [required]\n --version, -v INTEGER A flow version to run\n --parameters-file, -pf FILE PATH A filepath of a JSON file containing parameters\n --parameters-string, -ps TEXT A string of JSON parameters\n --run-name, -rn TEXT A name to assign for this run\n --watch, -w Watch current state of the flow run, stream output to stdout\n --logs, -l Get logs of the flow run, stream output to stdout\n\n \\b\n If both `--parameters-file` and `--parameters-string` are provided then the values passed\n in through the string will override the values provided from the file.\n\n \\b\n e.g.\n File contains: {\"a\": 1, \"b\": 2}\n String: '{\"a\": 3}'\n Parameters passed to the flow run: {\"a\": 3, \"b\": 2}\n \"\"\"\n\n if watch and logs:\n click.secho(\n \"Streaming state and logs not currently supported together.\", fg=\"red\"\n )\n return\n\n query = {\n \"query\": {\n with_args(\n \"flow\",\n {\n \"where\": {\n \"_and\": {\n \"name\": {\"_eq\": name},\n \"version\": {\"_eq\": version},\n \"project\": {\"name\": {\"_eq\": project}},\n }\n },\n \"order_by\": {\n \"name\": EnumValue(\"asc\"),\n \"version\": EnumValue(\"desc\"),\n },\n \"distinct_on\": EnumValue(\"name\"),\n },\n ): {\"id\": True}\n }\n }\n\n client = Client()\n result = client.graphql(query)\n\n flow_data = result.data.flow\n\n if flow_data:\n flow_id = flow_data[0].id\n else:\n click.secho(\"{} not found\".format(name), fg=\"red\")\n return\n\n # Load parameters from file if provided\n file_params = {}\n if parameters_file:\n with open(parameters_file) as params_file:\n file_params = json.load(params_file)\n\n # Load parameters from string if provided\n string_params = {}\n if parameters_string:\n string_params = json.loads(parameters_string)\n\n flow_run_id = client.create_flow_run(\n flow_id=flow_id, parameters={**file_params, **string_params}, run_name=run_name\n )\n click.echo(\"Flow Run ID: {}\".format(flow_run_id))\n\n if watch:\n current_states = []\n while True:\n query = {\n \"query\": {\n with_args(\"flow_run_by_pk\", {\"id\": flow_run_id}): {\n with_args(\n \"states\",\n {\"order_by\": {EnumValue(\"timestamp\"): EnumValue(\"asc\")}},\n ): {\"state\": True, \"timestamp\": True}\n }\n }\n }\n\n result = client.graphql(query)\n\n # Filter through retrieved states and output in order\n for state_index in result.data.flow_run_by_pk.states:\n state = state_index.state\n if state not in current_states:\n if state != \"Success\" and state != \"Failed\":\n click.echo(\"{} -> \".format(state), nl=False)\n else:\n click.echo(state)\n return\n\n current_states.append(state)\n\n time.sleep(3)\n\n if logs:\n all_logs = []\n\n log_query = {\n with_args(\n \"logs\", {\"order_by\": {EnumValue(\"timestamp\"): EnumValue(\"asc\")}}\n ): {\"timestamp\": True, \"message\": True, \"level\": True},\n \"start_time\": True,\n }\n\n query = {\n \"query\": {\n with_args(\n \"flow_run\",\n {\n \"where\": {\"id\": {\"_eq\": flow_run_id}},\n \"order_by\": {EnumValue(\"start_time\"): EnumValue(\"desc\")},\n },\n ): log_query\n }\n }\n\n while True:\n result = client.graphql(query)\n\n flow_run = result.data.flow_run\n if not flow_run:\n click.secho(\"{} not found\".format(flow_run_id), fg=\"red\")\n return\n\n new_run = flow_run[0]\n logs = new_run.logs\n output = []\n\n for i in logs:\n if [i.timestamp, i.level, i.message] not in all_logs:\n\n if not len(all_logs):\n click.echo(\n tabulate(\n [[i.timestamp, i.level, i.message]],\n headers=[\"TIMESTAMP\", \"LEVEL\", \"MESSAGE\"],\n tablefmt=\"plain\",\n numalign=\"left\",\n stralign=\"left\",\n )\n )\n all_logs.append([i.timestamp, i.level, i.message])\n continue\n\n output.append([i.timestamp, i.level, i.message])\n all_logs.append([i.timestamp, i.level, i.message])\n\n if output:\n click.echo(\n tabulate(output, tablefmt=\"plain\", numalign=\"left\", stralign=\"left\")\n )\n\n # Check if state is either Success or Failed, exit if it is\n pk_query = {\n \"query\": {\n with_args(\"flow_run_by_pk\", {\"id\": flow_run_id}): {\"state\": True}\n }\n }\n result = client.graphql(pk_query)\n\n if (\n result.data.flow_run_by_pk.state == \"Success\"\n or result.data.flow_run_by_pk.state == \"Failed\"\n ):\n return\n\n time.sleep(3)\n", "path": "src/prefect/cli/run.py"}]}
| 2,743 | 95 |
gh_patches_debug_34407
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-2726
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Liked photos API endpoint
To add an overview of liked photos to ThaliApp, we need a new endpoint for liked photos.
I think it would be best to have `api/v2/photos/photos/` with `liked` boolean GET filter. It will need to do some filtering to prevent photos that are not published in an album from being returned.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/photos/api/v2/urls.py`
Content:
```
1 """Photos app API v2 urls."""
2 from django.urls import include, path
3
4 from photos.api.v2.views import AlbumDetailView, AlbumListView, PhotoLikeView
5
6 app_name = "photos"
7
8 urlpatterns = [
9 path(
10 "photos/",
11 include(
12 [
13 path("albums/", AlbumListView.as_view(), name="album-list"),
14 path(
15 "albums/<slug:slug>/",
16 AlbumDetailView.as_view(),
17 name="album-detail",
18 ),
19 path(
20 "photos/<int:pk>/like/", PhotoLikeView.as_view(), name="photo-like"
21 ),
22 ]
23 ),
24 ),
25 ]
26
```
Path: `website/photos/api/v2/views.py`
Content:
```
1 from django.db.models import Count, Prefetch, Q
2
3 from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope
4 from rest_framework import filters, status
5 from rest_framework.exceptions import PermissionDenied
6 from rest_framework.generics import ListAPIView, RetrieveAPIView
7 from rest_framework.response import Response
8 from rest_framework.views import APIView
9
10 from photos import services
11 from photos.api.v2.serializers.album import AlbumListSerializer, AlbumSerializer
12 from photos.models import Album, Like, Photo
13
14
15 class AlbumListView(ListAPIView):
16 """Returns an overview of all albums."""
17
18 serializer_class = AlbumListSerializer
19 queryset = Album.objects.filter(hidden=False)
20 permission_classes = [
21 IsAuthenticatedOrTokenHasScope,
22 ]
23 required_scopes = ["photos:read"]
24 filter_backends = (filters.SearchFilter,)
25 search_fields = ("title", "date", "slug")
26
27
28 class AlbumDetailView(RetrieveAPIView):
29 """Returns the details of an album."""
30
31 serializer_class = AlbumSerializer
32 permission_classes = [
33 IsAuthenticatedOrTokenHasScope,
34 ]
35 required_scopes = ["photos:read"]
36 lookup_field = "slug"
37
38 def retrieve(self, request, *args, **kwargs):
39 if not services.is_album_accessible(request, self.get_object()):
40 raise PermissionDenied
41 return super().retrieve(request, *args, **kwargs)
42
43 def get_queryset(self):
44 photos = Photo.objects.select_properties("num_likes")
45 if self.request.member:
46 photos = photos.annotate(
47 member_likes=Count("likes", filter=Q(likes__member=self.request.member))
48 )
49 return Album.objects.filter(hidden=False).prefetch_related(
50 Prefetch("photo_set", queryset=photos)
51 )
52
53
54 class PhotoLikeView(APIView):
55 permission_classes = [IsAuthenticatedOrTokenHasScope]
56 required_scopes = ["photos:read"]
57
58 def get(self, request, **kwargs):
59 photo_id = kwargs.get("pk")
60 try:
61 photo = Photo.objects.filter(album__hidden=False).get(pk=photo_id)
62 except Photo.DoesNotExist:
63 return Response(status=status.HTTP_404_NOT_FOUND)
64
65 return Response(
66 {
67 "liked": photo.likes.filter(member=request.member).exists(),
68 "num_likes": photo.num_likes,
69 },
70 status=status.HTTP_200_OK,
71 )
72
73 def post(self, request, **kwargs):
74 photo_id = kwargs.get("pk")
75 try:
76 photo = Photo.objects.filter(album__hidden=False).get(pk=photo_id)
77 except Photo.DoesNotExist:
78 return Response(status=status.HTTP_404_NOT_FOUND)
79
80 _, created = Like.objects.get_or_create(photo=photo, member=request.member)
81
82 if created:
83 return Response(
84 {
85 "liked": photo.likes.filter(member=request.member).exists(),
86 "num_likes": photo.num_likes,
87 },
88 status=status.HTTP_201_CREATED,
89 )
90 return Response(
91 {
92 "liked": photo.likes.filter(member=request.member).exists(),
93 "num_likes": photo.num_likes,
94 },
95 status=status.HTTP_200_OK,
96 )
97
98 def delete(self, request, **kwargs):
99 photo_id = kwargs.get("pk")
100 try:
101 photo = Photo.objects.filter(album__hidden=False).get(pk=photo_id)
102 except Photo.DoesNotExist:
103 return Response(status=status.HTTP_404_NOT_FOUND)
104
105 try:
106 like = Like.objects.filter(photo__album__hidden=False).get(
107 member=request.member, photo__pk=photo_id
108 )
109 except Like.DoesNotExist:
110 return Response(
111 {
112 "liked": False,
113 "num_likes": photo.num_likes,
114 },
115 status=status.HTTP_204_NO_CONTENT,
116 )
117
118 like.delete()
119
120 return Response(
121 {
122 "liked": False,
123 "num_likes": photo.num_likes,
124 },
125 status=status.HTTP_202_ACCEPTED,
126 )
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/website/photos/api/v2/urls.py b/website/photos/api/v2/urls.py
--- a/website/photos/api/v2/urls.py
+++ b/website/photos/api/v2/urls.py
@@ -1,7 +1,12 @@
"""Photos app API v2 urls."""
from django.urls import include, path
-from photos.api.v2.views import AlbumDetailView, AlbumListView, PhotoLikeView
+from photos.api.v2.views import (
+ AlbumDetailView,
+ AlbumListView,
+ LikedPhotosListView,
+ PhotoLikeView,
+)
app_name = "photos"
@@ -19,6 +24,9 @@
path(
"photos/<int:pk>/like/", PhotoLikeView.as_view(), name="photo-like"
),
+ path(
+ "photos/liked/", LikedPhotosListView.as_view(), name="liked-photos"
+ ),
]
),
),
diff --git a/website/photos/api/v2/views.py b/website/photos/api/v2/views.py
--- a/website/photos/api/v2/views.py
+++ b/website/photos/api/v2/views.py
@@ -8,7 +8,11 @@
from rest_framework.views import APIView
from photos import services
-from photos.api.v2.serializers.album import AlbumListSerializer, AlbumSerializer
+from photos.api.v2.serializers.album import (
+ AlbumListSerializer,
+ AlbumSerializer,
+ PhotoListSerializer,
+)
from photos.models import Album, Like, Photo
@@ -51,6 +55,35 @@
)
+class LikedPhotosListView(ListAPIView):
+ """Returns the details the liked album."""
+
+ serializer_class = PhotoListSerializer
+ permission_classes = [
+ IsAuthenticatedOrTokenHasScope,
+ ]
+ required_scopes = ["photos:read"]
+
+ def get(self, request, *args, **kwargs):
+ if not self.request.member:
+ return Response(
+ data={
+ "detail": "You need to be a member in order to view your liked photos."
+ },
+ status=status.HTTP_403_FORBIDDEN,
+ )
+ return self.list(request, *args, **kwargs)
+
+ def get_queryset(self):
+ return (
+ Photo.objects.filter(likes__member=self.request.member, album__hidden=False)
+ .annotate(
+ member_likes=Count("likes", filter=Q(likes__member=self.request.member))
+ )
+ .select_properties("num_likes")
+ )
+
+
class PhotoLikeView(APIView):
permission_classes = [IsAuthenticatedOrTokenHasScope]
required_scopes = ["photos:read"]
|
{"golden_diff": "diff --git a/website/photos/api/v2/urls.py b/website/photos/api/v2/urls.py\n--- a/website/photos/api/v2/urls.py\n+++ b/website/photos/api/v2/urls.py\n@@ -1,7 +1,12 @@\n \"\"\"Photos app API v2 urls.\"\"\"\n from django.urls import include, path\n \n-from photos.api.v2.views import AlbumDetailView, AlbumListView, PhotoLikeView\n+from photos.api.v2.views import (\n+ AlbumDetailView,\n+ AlbumListView,\n+ LikedPhotosListView,\n+ PhotoLikeView,\n+)\n \n app_name = \"photos\"\n \n@@ -19,6 +24,9 @@\n path(\n \"photos/<int:pk>/like/\", PhotoLikeView.as_view(), name=\"photo-like\"\n ),\n+ path(\n+ \"photos/liked/\", LikedPhotosListView.as_view(), name=\"liked-photos\"\n+ ),\n ]\n ),\n ),\ndiff --git a/website/photos/api/v2/views.py b/website/photos/api/v2/views.py\n--- a/website/photos/api/v2/views.py\n+++ b/website/photos/api/v2/views.py\n@@ -8,7 +8,11 @@\n from rest_framework.views import APIView\n \n from photos import services\n-from photos.api.v2.serializers.album import AlbumListSerializer, AlbumSerializer\n+from photos.api.v2.serializers.album import (\n+ AlbumListSerializer,\n+ AlbumSerializer,\n+ PhotoListSerializer,\n+)\n from photos.models import Album, Like, Photo\n \n \n@@ -51,6 +55,35 @@\n )\n \n \n+class LikedPhotosListView(ListAPIView):\n+ \"\"\"Returns the details the liked album.\"\"\"\n+\n+ serializer_class = PhotoListSerializer\n+ permission_classes = [\n+ IsAuthenticatedOrTokenHasScope,\n+ ]\n+ required_scopes = [\"photos:read\"]\n+\n+ def get(self, request, *args, **kwargs):\n+ if not self.request.member:\n+ return Response(\n+ data={\n+ \"detail\": \"You need to be a member in order to view your liked photos.\"\n+ },\n+ status=status.HTTP_403_FORBIDDEN,\n+ )\n+ return self.list(request, *args, **kwargs)\n+\n+ def get_queryset(self):\n+ return (\n+ Photo.objects.filter(likes__member=self.request.member, album__hidden=False)\n+ .annotate(\n+ member_likes=Count(\"likes\", filter=Q(likes__member=self.request.member))\n+ )\n+ .select_properties(\"num_likes\")\n+ )\n+\n+\n class PhotoLikeView(APIView):\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"photos:read\"]\n", "issue": "Liked photos API endpoint\nTo add an overview of liked photos to ThaliApp, we need a new endpoint for liked photos.\r\n\r\nI think it would be best to have `api/v2/photos/photos/` with `liked` boolean GET filter. It will need to do some filtering to prevent photos that are not published in an album from being returned.\n", "before_files": [{"content": "\"\"\"Photos app API v2 urls.\"\"\"\nfrom django.urls import include, path\n\nfrom photos.api.v2.views import AlbumDetailView, AlbumListView, PhotoLikeView\n\napp_name = \"photos\"\n\nurlpatterns = [\n path(\n \"photos/\",\n include(\n [\n path(\"albums/\", AlbumListView.as_view(), name=\"album-list\"),\n path(\n \"albums/<slug:slug>/\",\n AlbumDetailView.as_view(),\n name=\"album-detail\",\n ),\n path(\n \"photos/<int:pk>/like/\", PhotoLikeView.as_view(), name=\"photo-like\"\n ),\n ]\n ),\n ),\n]\n", "path": "website/photos/api/v2/urls.py"}, {"content": "from django.db.models import Count, Prefetch, Q\n\nfrom oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework import filters, status\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.generics import ListAPIView, RetrieveAPIView\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom photos import services\nfrom photos.api.v2.serializers.album import AlbumListSerializer, AlbumSerializer\nfrom photos.models import Album, Like, Photo\n\n\nclass AlbumListView(ListAPIView):\n \"\"\"Returns an overview of all albums.\"\"\"\n\n serializer_class = AlbumListSerializer\n queryset = Album.objects.filter(hidden=False)\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"photos:read\"]\n filter_backends = (filters.SearchFilter,)\n search_fields = (\"title\", \"date\", \"slug\")\n\n\nclass AlbumDetailView(RetrieveAPIView):\n \"\"\"Returns the details of an album.\"\"\"\n\n serializer_class = AlbumSerializer\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"photos:read\"]\n lookup_field = \"slug\"\n\n def retrieve(self, request, *args, **kwargs):\n if not services.is_album_accessible(request, self.get_object()):\n raise PermissionDenied\n return super().retrieve(request, *args, **kwargs)\n\n def get_queryset(self):\n photos = Photo.objects.select_properties(\"num_likes\")\n if self.request.member:\n photos = photos.annotate(\n member_likes=Count(\"likes\", filter=Q(likes__member=self.request.member))\n )\n return Album.objects.filter(hidden=False).prefetch_related(\n Prefetch(\"photo_set\", queryset=photos)\n )\n\n\nclass PhotoLikeView(APIView):\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"photos:read\"]\n\n def get(self, request, **kwargs):\n photo_id = kwargs.get(\"pk\")\n try:\n photo = Photo.objects.filter(album__hidden=False).get(pk=photo_id)\n except Photo.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n return Response(\n {\n \"liked\": photo.likes.filter(member=request.member).exists(),\n \"num_likes\": photo.num_likes,\n },\n status=status.HTTP_200_OK,\n )\n\n def post(self, request, **kwargs):\n photo_id = kwargs.get(\"pk\")\n try:\n photo = Photo.objects.filter(album__hidden=False).get(pk=photo_id)\n except Photo.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n _, created = Like.objects.get_or_create(photo=photo, member=request.member)\n\n if created:\n return Response(\n {\n \"liked\": photo.likes.filter(member=request.member).exists(),\n \"num_likes\": photo.num_likes,\n },\n status=status.HTTP_201_CREATED,\n )\n return Response(\n {\n \"liked\": photo.likes.filter(member=request.member).exists(),\n \"num_likes\": photo.num_likes,\n },\n status=status.HTTP_200_OK,\n )\n\n def delete(self, request, **kwargs):\n photo_id = kwargs.get(\"pk\")\n try:\n photo = Photo.objects.filter(album__hidden=False).get(pk=photo_id)\n except Photo.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n try:\n like = Like.objects.filter(photo__album__hidden=False).get(\n member=request.member, photo__pk=photo_id\n )\n except Like.DoesNotExist:\n return Response(\n {\n \"liked\": False,\n \"num_likes\": photo.num_likes,\n },\n status=status.HTTP_204_NO_CONTENT,\n )\n\n like.delete()\n\n return Response(\n {\n \"liked\": False,\n \"num_likes\": photo.num_likes,\n },\n status=status.HTTP_202_ACCEPTED,\n )\n", "path": "website/photos/api/v2/views.py"}], "after_files": [{"content": "\"\"\"Photos app API v2 urls.\"\"\"\nfrom django.urls import include, path\n\nfrom photos.api.v2.views import (\n AlbumDetailView,\n AlbumListView,\n LikedPhotosListView,\n PhotoLikeView,\n)\n\napp_name = \"photos\"\n\nurlpatterns = [\n path(\n \"photos/\",\n include(\n [\n path(\"albums/\", AlbumListView.as_view(), name=\"album-list\"),\n path(\n \"albums/<slug:slug>/\",\n AlbumDetailView.as_view(),\n name=\"album-detail\",\n ),\n path(\n \"photos/<int:pk>/like/\", PhotoLikeView.as_view(), name=\"photo-like\"\n ),\n path(\n \"photos/liked/\", LikedPhotosListView.as_view(), name=\"liked-photos\"\n ),\n ]\n ),\n ),\n]\n", "path": "website/photos/api/v2/urls.py"}, {"content": "from django.db.models import Count, Prefetch, Q\n\nfrom oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework import filters, status\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.generics import ListAPIView, RetrieveAPIView\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom photos import services\nfrom photos.api.v2.serializers.album import (\n AlbumListSerializer,\n AlbumSerializer,\n PhotoListSerializer,\n)\nfrom photos.models import Album, Like, Photo\n\n\nclass AlbumListView(ListAPIView):\n \"\"\"Returns an overview of all albums.\"\"\"\n\n serializer_class = AlbumListSerializer\n queryset = Album.objects.filter(hidden=False)\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"photos:read\"]\n filter_backends = (filters.SearchFilter,)\n search_fields = (\"title\", \"date\", \"slug\")\n\n\nclass AlbumDetailView(RetrieveAPIView):\n \"\"\"Returns the details of an album.\"\"\"\n\n serializer_class = AlbumSerializer\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"photos:read\"]\n lookup_field = \"slug\"\n\n def retrieve(self, request, *args, **kwargs):\n if not services.is_album_accessible(request, self.get_object()):\n raise PermissionDenied\n return super().retrieve(request, *args, **kwargs)\n\n def get_queryset(self):\n photos = Photo.objects.select_properties(\"num_likes\")\n if self.request.member:\n photos = photos.annotate(\n member_likes=Count(\"likes\", filter=Q(likes__member=self.request.member))\n )\n return Album.objects.filter(hidden=False).prefetch_related(\n Prefetch(\"photo_set\", queryset=photos)\n )\n\n\nclass LikedPhotosListView(ListAPIView):\n \"\"\"Returns the details the liked album.\"\"\"\n\n serializer_class = PhotoListSerializer\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"photos:read\"]\n\n def get(self, request, *args, **kwargs):\n if not self.request.member:\n return Response(\n data={\n \"detail\": \"You need to be a member in order to view your liked photos.\"\n },\n status=status.HTTP_403_FORBIDDEN,\n )\n return self.list(request, *args, **kwargs)\n\n def get_queryset(self):\n return (\n Photo.objects.filter(likes__member=self.request.member, album__hidden=False)\n .annotate(\n member_likes=Count(\"likes\", filter=Q(likes__member=self.request.member))\n )\n .select_properties(\"num_likes\")\n )\n\n\nclass PhotoLikeView(APIView):\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"photos:read\"]\n\n def get(self, request, **kwargs):\n photo_id = kwargs.get(\"pk\")\n try:\n photo = Photo.objects.filter(album__hidden=False).get(pk=photo_id)\n except Photo.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n return Response(\n {\n \"liked\": photo.likes.filter(member=request.member).exists(),\n \"num_likes\": photo.num_likes,\n },\n status=status.HTTP_200_OK,\n )\n\n def post(self, request, **kwargs):\n photo_id = kwargs.get(\"pk\")\n try:\n photo = Photo.objects.filter(album__hidden=False).get(pk=photo_id)\n except Photo.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n _, created = Like.objects.get_or_create(photo=photo, member=request.member)\n\n if created:\n return Response(\n {\n \"liked\": photo.likes.filter(member=request.member).exists(),\n \"num_likes\": photo.num_likes,\n },\n status=status.HTTP_201_CREATED,\n )\n return Response(\n {\n \"liked\": photo.likes.filter(member=request.member).exists(),\n \"num_likes\": photo.num_likes,\n },\n status=status.HTTP_200_OK,\n )\n\n def delete(self, request, **kwargs):\n photo_id = kwargs.get(\"pk\")\n try:\n photo = Photo.objects.filter(album__hidden=False).get(pk=photo_id)\n except Photo.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n try:\n like = Like.objects.filter(photo__album__hidden=False).get(\n member=request.member, photo__pk=photo_id\n )\n except Like.DoesNotExist:\n return Response(\n {\n \"liked\": False,\n \"num_likes\": photo.num_likes,\n },\n status=status.HTTP_204_NO_CONTENT,\n )\n\n like.delete()\n\n return Response(\n {\n \"liked\": False,\n \"num_likes\": photo.num_likes,\n },\n status=status.HTTP_202_ACCEPTED,\n )\n", "path": "website/photos/api/v2/views.py"}]}
| 1,634 | 588 |
gh_patches_debug_2593
|
rasdani/github-patches
|
git_diff
|
secdev__scapy-3167
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Outdated Automotive Documentation
Reminder for myself.
Outdated:
https://github.com/secdev/scapy/blob/1aa0d8a849f7b102d18a3f65986e272aec5f518a/doc/scapy/layers/automotive.rst#L75-L85
SOME/IP:
https://github.com/secdev/scapy/blob/1aa0d8a849f7b102d18a3f65986e272aec5f518a/doc/scapy/layers/automotive.rst#L1011-L1030
Mentioned by @WebLabInt via gitter:
```Hi, I m having a problem creating a basic SOME IP service discovery following the example provided https://scapy.readthedocs.io/en/latest/layers/automotive.html?highlight=some%20ip#creating-a-some-ip-sd-message. The SOME IP package is working perfectly, however, the SD packet is not formed correctly thus not recognized as a SD packet by Wireshark and the SOME IP version is not correct. I did a capture with Wireshark reporting those issues http://fuiing.com/share/SD%20prob.png . I will be great if you can support me on this issue, thank you for making Scapy open source, it's really a great tool, have a great day ```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doc/scapy/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Scapy documentation build configuration file, created by
4 # sphinx-quickstart on Wed Mar 07 19:02:35 2018.
5 #
6 # This file is execfile()d with the current directory set to its
7 # containing dir.
8 #
9 # Note that not all possible configuration values are present in this
10 # autogenerated file.
11 #
12 # All configuration values have a default; values that are commented out
13 # serve to show the default.
14
15 import datetime
16
17 # If extensions (or modules to document with autodoc) are in another directory,
18 # add these directories to sys.path here. If the directory is relative to the
19 # documentation root, use os.path.abspath to make it absolute, like shown here.
20 #
21 import os
22 import sys
23 sys.path.insert(0, os.path.abspath('../../'))
24 sys.path.append(os.path.abspath('_ext'))
25
26
27 # -- General configuration ------------------------------------------------
28
29 # If your documentation needs a minimal Sphinx version, state it here.
30 #
31 needs_sphinx = '3.0.0'
32
33 # Add any Sphinx extension module names here, as strings. They can be
34 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
35 # ones.
36 extensions = [
37 'sphinx.ext.autodoc',
38 'sphinx.ext.napoleon',
39 'sphinx.ext.todo',
40 'sphinx.ext.linkcode',
41 'scapy_doc'
42 ]
43
44 # Autodoc configuration
45 autodoc_inherit_docstrings = False
46 autodoc_default_options = {
47 'undoc-members': True
48 }
49
50 # Enable the todo module
51 todo_include_todos = True
52
53 # Linkcode resolver
54 from linkcode_res import linkcode_resolve
55
56 # Add any paths that contain templates here, relative to this directory.
57 templates_path = ['_templates']
58
59 # The suffix(es) of source filenames.
60 # You can specify multiple suffix as a list of string:
61 #
62 # source_suffix = ['.rst', '.md']
63 source_suffix = '.rst'
64
65 # The master toctree document.
66 master_doc = 'index'
67
68 # General information about the project.
69 project = 'Scapy'
70 year = datetime.datetime.now().year
71 copyright = '2008-%s Philippe Biondi and the Scapy community' % year
72
73 # The version info for the project you're documenting, acts as replacement for
74 # |version| and |release|, also used in various other places throughout the
75 # built documents.
76 from scapy import VERSION, VERSION_MAIN
77 # The short X.Y version.
78 release = VERSION_MAIN
79 # The full version, including alpha/beta/rc tags.
80 version = VERSION
81
82 # The language for content autogenerated by Sphinx. Refer to documentation
83 # for a list of supported languages.
84 #
85 # This is also used if you do content translation via gettext catalogs.
86 # Usually you set "language" from the command line for these cases.
87 language = None
88
89 # List of patterns, relative to source directory, that match files and
90 # directories to ignore when looking for source files.
91 # This patterns also effect to html_static_path and html_extra_path
92 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
93
94 # The name of the Pygments (syntax highlighting) style to use.
95 pygments_style = 'sphinx'
96
97 # If true, `todo` and `todoList` produce output, else they produce nothing.
98 todo_include_todos = False
99
100
101 # -- Options for HTML output ----------------------------------------------
102
103 # The theme to use for HTML and HTML Help pages. See the documentation for
104 # a list of builtin themes.
105 #
106 html_theme = 'sphinx_rtd_theme'
107
108 # Theme options are theme-specific and customize the look and feel of a theme
109 # further. For a list of options available for each theme, see the
110 # documentation.
111 #
112 # html_theme_options = {}
113
114 # Add any paths that contain custom static files (such as style sheets) here,
115 # relative to this directory. They are copied after the builtin static files,
116 # so a file named "default.css" will overwrite the builtin "default.css".
117 html_static_path = ['_static']
118
119 # Custom sidebar templates, must be a dictionary that maps document names
120 # to template names.
121 #
122 # This is required for the alabaster theme
123 # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
124 html_sidebars = {
125 '**': [
126 'relations.html', # needs 'show_related': True theme option to display
127 'searchbox.html',
128 ]
129 }
130
131 # Make :manpage directive work on HTML output.
132 manpages_url = 'https://manpages.debian.org/{path}'
133
134 # -- Options for HTMLHelp output ------------------------------------------
135
136 # Output file base name for HTML help builder.
137 htmlhelp_basename = 'Scapydoc'
138
139
140 # -- Options for LaTeX output ---------------------------------------------
141
142 latex_elements = {
143 # The paper size ('letterpaper' or 'a4paper').
144 #
145 'papersize': 'a4paper',
146
147 # The font size ('10pt', '11pt' or '12pt').
148 #
149 'pointsize': '11pt',
150
151 # Additional stuff for the LaTeX preamble.
152 #
153 # 'preamble': '',
154
155 # Latex figure (float) alignment
156 #
157 # 'figure_align': 'htbp',
158 }
159
160 # Grouping the document tree into LaTeX files. List of tuples
161 # (source start file, target name, title,
162 # author, documentclass [howto, manual, or own class]).
163 latex_documents = [
164 ('index', 'Scapy.tex', 'Scapy Documentation',
165 'Philippe Biondi and the Scapy community', 'manual'),
166 ]
167
168
169 # -- Options for manual page output ---------------------------------------
170
171 # One entry per manual page. List of tuples
172 # (source start file, name, description, authors, manual section).
173 man_pages = [
174 (master_doc, 'scapy', 'Scapy Documentation',
175 ['Philippe Biondi and the Scapy community'], 1)
176 ]
177
178
179 # -- Options for Texinfo output -------------------------------------------
180
181 # Grouping the document tree into Texinfo files. List of tuples
182 # (source start file, target name, title, author,
183 # dir menu entry, description, category)
184 texinfo_documents = [
185 (master_doc, 'Scapy', 'Scapy Documentation',
186 'Philippe Biondi and the Scapy community', 'Scapy',
187 '',
188 'Miscellaneous'),
189 ]
190
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/doc/scapy/conf.py b/doc/scapy/conf.py
--- a/doc/scapy/conf.py
+++ b/doc/scapy/conf.py
@@ -97,6 +97,9 @@
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
+# Enable codeauthor and sectionauthor directives
+show_authors = True
+
# -- Options for HTML output ----------------------------------------------
|
{"golden_diff": "diff --git a/doc/scapy/conf.py b/doc/scapy/conf.py\n--- a/doc/scapy/conf.py\n+++ b/doc/scapy/conf.py\n@@ -97,6 +97,9 @@\n # If true, `todo` and `todoList` produce output, else they produce nothing.\n todo_include_todos = False\n \n+# Enable codeauthor and sectionauthor directives\n+show_authors = True\n+\n \n # -- Options for HTML output ----------------------------------------------\n", "issue": "Outdated Automotive Documentation\nReminder for myself.\r\n\r\nOutdated:\r\nhttps://github.com/secdev/scapy/blob/1aa0d8a849f7b102d18a3f65986e272aec5f518a/doc/scapy/layers/automotive.rst#L75-L85\r\n\r\nSOME/IP:\r\nhttps://github.com/secdev/scapy/blob/1aa0d8a849f7b102d18a3f65986e272aec5f518a/doc/scapy/layers/automotive.rst#L1011-L1030\r\nMentioned by @WebLabInt via gitter:\r\n```Hi, I m having a problem creating a basic SOME IP service discovery following the example provided https://scapy.readthedocs.io/en/latest/layers/automotive.html?highlight=some%20ip#creating-a-some-ip-sd-message. The SOME IP package is working perfectly, however, the SD packet is not formed correctly thus not recognized as a SD packet by Wireshark and the SOME IP version is not correct. I did a capture with Wireshark reporting those issues http://fuiing.com/share/SD%20prob.png . I will be great if you can support me on this issue, thank you for making Scapy open source, it's really a great tool, have a great day ```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Scapy documentation build configuration file, created by\n# sphinx-quickstart on Wed Mar 07 19:02:35 2018.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport datetime\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('../../'))\nsys.path.append(os.path.abspath('_ext'))\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\nneeds_sphinx = '3.0.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.todo',\n 'sphinx.ext.linkcode',\n 'scapy_doc'\n]\n\n# Autodoc configuration\nautodoc_inherit_docstrings = False\nautodoc_default_options = {\n 'undoc-members': True\n}\n\n# Enable the todo module\ntodo_include_todos = True\n\n# Linkcode resolver\nfrom linkcode_res import linkcode_resolve\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Scapy'\nyear = datetime.datetime.now().year\ncopyright = '2008-%s Philippe Biondi and the Scapy community' % year\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\nfrom scapy import VERSION, VERSION_MAIN\n# The short X.Y version.\nrelease = VERSION_MAIN\n# The full version, including alpha/beta/rc tags.\nversion = VERSION\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# This is required for the alabaster theme\n# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\nhtml_sidebars = {\n '**': [\n 'relations.html', # needs 'show_related': True theme option to display\n 'searchbox.html',\n ]\n}\n\n# Make :manpage directive work on HTML output.\nmanpages_url = 'https://manpages.debian.org/{path}'\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Scapydoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n 'papersize': 'a4paper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n 'pointsize': '11pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n ('index', 'Scapy.tex', 'Scapy Documentation',\n 'Philippe Biondi and the Scapy community', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'scapy', 'Scapy Documentation',\n ['Philippe Biondi and the Scapy community'], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'Scapy', 'Scapy Documentation',\n 'Philippe Biondi and the Scapy community', 'Scapy',\n '',\n 'Miscellaneous'),\n]\n", "path": "doc/scapy/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Scapy documentation build configuration file, created by\n# sphinx-quickstart on Wed Mar 07 19:02:35 2018.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport datetime\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('../../'))\nsys.path.append(os.path.abspath('_ext'))\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\nneeds_sphinx = '3.0.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.todo',\n 'sphinx.ext.linkcode',\n 'scapy_doc'\n]\n\n# Autodoc configuration\nautodoc_inherit_docstrings = False\nautodoc_default_options = {\n 'undoc-members': True\n}\n\n# Enable the todo module\ntodo_include_todos = True\n\n# Linkcode resolver\nfrom linkcode_res import linkcode_resolve\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Scapy'\nyear = datetime.datetime.now().year\ncopyright = '2008-%s Philippe Biondi and the Scapy community' % year\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\nfrom scapy import VERSION, VERSION_MAIN\n# The short X.Y version.\nrelease = VERSION_MAIN\n# The full version, including alpha/beta/rc tags.\nversion = VERSION\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n# Enable codeauthor and sectionauthor directives\nshow_authors = True\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# This is required for the alabaster theme\n# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\nhtml_sidebars = {\n '**': [\n 'relations.html', # needs 'show_related': True theme option to display\n 'searchbox.html',\n ]\n}\n\n# Make :manpage directive work on HTML output.\nmanpages_url = 'https://manpages.debian.org/{path}'\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Scapydoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n 'papersize': 'a4paper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n 'pointsize': '11pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n ('index', 'Scapy.tex', 'Scapy Documentation',\n 'Philippe Biondi and the Scapy community', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'scapy', 'Scapy Documentation',\n ['Philippe Biondi and the Scapy community'], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'Scapy', 'Scapy Documentation',\n 'Philippe Biondi and the Scapy community', 'Scapy',\n '',\n 'Miscellaneous'),\n]\n", "path": "doc/scapy/conf.py"}]}
| 2,420 | 97 |
gh_patches_debug_665
|
rasdani/github-patches
|
git_diff
|
scikit-image__scikit-image-1741
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
peak_local_max Incorrect output type
This [function](http://scikit-image.org/docs/dev/api/skimage.feature.html#peak-local-max) is returning a `list` instead of an `ndarray` if no peaks are detected.
I traced the problem till this [line](https://github.com/scikit-image/scikit-image/blob/master/skimage/feature/peak.py#L122). However, I have to check if there is other case (beyond this line) that produces an incorrect output.
I will work on it this weekend and submit a pull-request or a code snippet here
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/feature/peak.py`
Content:
```
1 import numpy as np
2 import scipy.ndimage as ndi
3 from ..filters import rank_order
4
5
6 def peak_local_max(image, min_distance=10, threshold_abs=0, threshold_rel=0.1,
7 exclude_border=True, indices=True, num_peaks=np.inf,
8 footprint=None, labels=None):
9 """
10 Find peaks in an image, and return them as coordinates or a boolean array.
11
12 Peaks are the local maxima in a region of `2 * min_distance + 1`
13 (i.e. peaks are separated by at least `min_distance`).
14
15 NOTE: If peaks are flat (i.e. multiple adjacent pixels have identical
16 intensities), the coordinates of all such pixels are returned.
17
18 Parameters
19 ----------
20 image : ndarray of floats
21 Input image.
22 min_distance : int
23 Minimum number of pixels separating peaks in a region of `2 *
24 min_distance + 1` (i.e. peaks are separated by at least
25 `min_distance`). If `exclude_border` is True, this value also excludes
26 a border `min_distance` from the image boundary.
27 To find the maximum number of peaks, use `min_distance=1`.
28 threshold_abs : float
29 Minimum intensity of peaks.
30 threshold_rel : float
31 Minimum intensity of peaks calculated as `max(image) * threshold_rel`.
32 exclude_border : bool
33 If True, `min_distance` excludes peaks from the border of the image as
34 well as from each other.
35 indices : bool
36 If True, the output will be an array representing peak coordinates.
37 If False, the output will be a boolean array shaped as `image.shape`
38 with peaks present at True elements.
39 num_peaks : int
40 Maximum number of peaks. When the number of peaks exceeds `num_peaks`,
41 return `num_peaks` peaks based on highest peak intensity.
42 footprint : ndarray of bools, optional
43 If provided, `footprint == 1` represents the local region within which
44 to search for peaks at every point in `image`. Overrides
45 `min_distance`, except for border exclusion if `exclude_border=True`.
46 labels : ndarray of ints, optional
47 If provided, each unique region `labels == value` represents a unique
48 region to search for peaks. Zero is reserved for background.
49
50 Returns
51 -------
52 output : ndarray or ndarray of bools
53
54 * If `indices = True` : (row, column, ...) coordinates of peaks.
55 * If `indices = False` : Boolean array shaped like `image`, with peaks
56 represented by True values.
57
58 Notes
59 -----
60 The peak local maximum function returns the coordinates of local peaks
61 (maxima) in a image. A maximum filter is used for finding local maxima.
62 This operation dilates the original image. After comparison between
63 dilated and original image, peak_local_max function returns the
64 coordinates of peaks where dilated image = original.
65
66 Examples
67 --------
68 >>> img1 = np.zeros((7, 7))
69 >>> img1[3, 4] = 1
70 >>> img1[3, 2] = 1.5
71 >>> img1
72 array([[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
73 [ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
74 [ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
75 [ 0. , 0. , 1.5, 0. , 1. , 0. , 0. ],
76 [ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
77 [ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
78 [ 0. , 0. , 0. , 0. , 0. , 0. , 0. ]])
79
80 >>> peak_local_max(img1, min_distance=1)
81 array([[3, 2],
82 [3, 4]])
83
84 >>> peak_local_max(img1, min_distance=2)
85 array([[3, 2]])
86
87 >>> img2 = np.zeros((20, 20, 20))
88 >>> img2[10, 10, 10] = 1
89 >>> peak_local_max(img2, exclude_border=False)
90 array([[10, 10, 10]])
91
92 """
93 out = np.zeros_like(image, dtype=np.bool)
94 # In the case of labels, recursively build and return an output
95 # operating on each label separately
96 if labels is not None:
97 label_values = np.unique(labels)
98 # Reorder label values to have consecutive integers (no gaps)
99 if np.any(np.diff(label_values) != 1):
100 mask = labels >= 1
101 labels[mask] = 1 + rank_order(labels[mask])[0].astype(labels.dtype)
102 labels = labels.astype(np.int32)
103
104 # New values for new ordering
105 label_values = np.unique(labels)
106 for label in label_values[label_values != 0]:
107 maskim = (labels == label)
108 out += peak_local_max(image * maskim, min_distance=min_distance,
109 threshold_abs=threshold_abs,
110 threshold_rel=threshold_rel,
111 exclude_border=exclude_border,
112 indices=False, num_peaks=np.inf,
113 footprint=footprint, labels=None)
114
115 if indices is True:
116 return np.transpose(out.nonzero())
117 else:
118 return out.astype(np.bool)
119
120 if np.all(image == image.flat[0]):
121 if indices is True:
122 return []
123 else:
124 return out
125
126 image = image.copy()
127 # Non maximum filter
128 if footprint is not None:
129 image_max = ndi.maximum_filter(image, footprint=footprint,
130 mode='constant')
131 else:
132 size = 2 * min_distance + 1
133 image_max = ndi.maximum_filter(image, size=size, mode='constant')
134 mask = (image == image_max)
135 image *= mask
136
137 if exclude_border:
138 # zero out the image borders
139 for i in range(image.ndim):
140 image = image.swapaxes(0, i)
141 image[:min_distance] = 0
142 image[-min_distance:] = 0
143 image = image.swapaxes(0, i)
144
145 # find top peak candidates above a threshold
146 peak_threshold = max(np.max(image.ravel()) * threshold_rel, threshold_abs)
147
148 # get coordinates of peaks
149 coordinates = np.argwhere(image > peak_threshold)
150
151 if coordinates.shape[0] > num_peaks:
152 intensities = image.flat[np.ravel_multi_index(coordinates.transpose(),image.shape)]
153 idx_maxsort = np.argsort(intensities)[::-1]
154 coordinates = coordinates[idx_maxsort][:num_peaks]
155
156 if indices is True:
157 return coordinates
158 else:
159 nd_indices = tuple(coordinates.T)
160 out[nd_indices] = True
161 return out
162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/skimage/feature/peak.py b/skimage/feature/peak.py
--- a/skimage/feature/peak.py
+++ b/skimage/feature/peak.py
@@ -119,7 +119,7 @@
if np.all(image == image.flat[0]):
if indices is True:
- return []
+ return np.empty((0, 2), np.int)
else:
return out
|
{"golden_diff": "diff --git a/skimage/feature/peak.py b/skimage/feature/peak.py\n--- a/skimage/feature/peak.py\n+++ b/skimage/feature/peak.py\n@@ -119,7 +119,7 @@\n \n if np.all(image == image.flat[0]):\n if indices is True:\n- return []\n+ return np.empty((0, 2), np.int)\n else:\n return out\n", "issue": "peak_local_max Incorrect output type \nThis [function](http://scikit-image.org/docs/dev/api/skimage.feature.html#peak-local-max) is returning a `list` instead of an `ndarray` if no peaks are detected.\n\nI traced the problem till this [line](https://github.com/scikit-image/scikit-image/blob/master/skimage/feature/peak.py#L122). However, I have to check if there is other case (beyond this line) that produces an incorrect output.\n\nI will work on it this weekend and submit a pull-request or a code snippet here\n\n", "before_files": [{"content": "import numpy as np\nimport scipy.ndimage as ndi\nfrom ..filters import rank_order\n\n\ndef peak_local_max(image, min_distance=10, threshold_abs=0, threshold_rel=0.1,\n exclude_border=True, indices=True, num_peaks=np.inf,\n footprint=None, labels=None):\n \"\"\"\n Find peaks in an image, and return them as coordinates or a boolean array.\n\n Peaks are the local maxima in a region of `2 * min_distance + 1`\n (i.e. peaks are separated by at least `min_distance`).\n\n NOTE: If peaks are flat (i.e. multiple adjacent pixels have identical\n intensities), the coordinates of all such pixels are returned.\n\n Parameters\n ----------\n image : ndarray of floats\n Input image.\n min_distance : int\n Minimum number of pixels separating peaks in a region of `2 *\n min_distance + 1` (i.e. peaks are separated by at least\n `min_distance`). If `exclude_border` is True, this value also excludes\n a border `min_distance` from the image boundary.\n To find the maximum number of peaks, use `min_distance=1`.\n threshold_abs : float\n Minimum intensity of peaks.\n threshold_rel : float\n Minimum intensity of peaks calculated as `max(image) * threshold_rel`.\n exclude_border : bool\n If True, `min_distance` excludes peaks from the border of the image as\n well as from each other.\n indices : bool\n If True, the output will be an array representing peak coordinates.\n If False, the output will be a boolean array shaped as `image.shape`\n with peaks present at True elements.\n num_peaks : int\n Maximum number of peaks. When the number of peaks exceeds `num_peaks`,\n return `num_peaks` peaks based on highest peak intensity.\n footprint : ndarray of bools, optional\n If provided, `footprint == 1` represents the local region within which\n to search for peaks at every point in `image`. Overrides\n `min_distance`, except for border exclusion if `exclude_border=True`.\n labels : ndarray of ints, optional\n If provided, each unique region `labels == value` represents a unique\n region to search for peaks. Zero is reserved for background.\n\n Returns\n -------\n output : ndarray or ndarray of bools\n\n * If `indices = True` : (row, column, ...) coordinates of peaks.\n * If `indices = False` : Boolean array shaped like `image`, with peaks\n represented by True values.\n\n Notes\n -----\n The peak local maximum function returns the coordinates of local peaks\n (maxima) in a image. A maximum filter is used for finding local maxima.\n This operation dilates the original image. After comparison between\n dilated and original image, peak_local_max function returns the\n coordinates of peaks where dilated image = original.\n\n Examples\n --------\n >>> img1 = np.zeros((7, 7))\n >>> img1[3, 4] = 1\n >>> img1[3, 2] = 1.5\n >>> img1\n array([[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [ 0. , 0. , 1.5, 0. , 1. , 0. , 0. ],\n [ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [ 0. , 0. , 0. , 0. , 0. , 0. , 0. ]])\n\n >>> peak_local_max(img1, min_distance=1)\n array([[3, 2],\n [3, 4]])\n\n >>> peak_local_max(img1, min_distance=2)\n array([[3, 2]])\n\n >>> img2 = np.zeros((20, 20, 20))\n >>> img2[10, 10, 10] = 1\n >>> peak_local_max(img2, exclude_border=False)\n array([[10, 10, 10]])\n\n \"\"\"\n out = np.zeros_like(image, dtype=np.bool)\n # In the case of labels, recursively build and return an output\n # operating on each label separately\n if labels is not None:\n label_values = np.unique(labels)\n # Reorder label values to have consecutive integers (no gaps)\n if np.any(np.diff(label_values) != 1):\n mask = labels >= 1\n labels[mask] = 1 + rank_order(labels[mask])[0].astype(labels.dtype)\n labels = labels.astype(np.int32)\n\n # New values for new ordering\n label_values = np.unique(labels)\n for label in label_values[label_values != 0]:\n maskim = (labels == label)\n out += peak_local_max(image * maskim, min_distance=min_distance,\n threshold_abs=threshold_abs,\n threshold_rel=threshold_rel,\n exclude_border=exclude_border,\n indices=False, num_peaks=np.inf,\n footprint=footprint, labels=None)\n\n if indices is True:\n return np.transpose(out.nonzero())\n else:\n return out.astype(np.bool)\n\n if np.all(image == image.flat[0]):\n if indices is True:\n return []\n else:\n return out\n\n image = image.copy()\n # Non maximum filter\n if footprint is not None:\n image_max = ndi.maximum_filter(image, footprint=footprint,\n mode='constant')\n else:\n size = 2 * min_distance + 1\n image_max = ndi.maximum_filter(image, size=size, mode='constant')\n mask = (image == image_max)\n image *= mask\n\n if exclude_border:\n # zero out the image borders\n for i in range(image.ndim):\n image = image.swapaxes(0, i)\n image[:min_distance] = 0\n image[-min_distance:] = 0\n image = image.swapaxes(0, i)\n\n # find top peak candidates above a threshold\n peak_threshold = max(np.max(image.ravel()) * threshold_rel, threshold_abs)\n\n # get coordinates of peaks\n coordinates = np.argwhere(image > peak_threshold)\n\n if coordinates.shape[0] > num_peaks:\n intensities = image.flat[np.ravel_multi_index(coordinates.transpose(),image.shape)]\n idx_maxsort = np.argsort(intensities)[::-1]\n coordinates = coordinates[idx_maxsort][:num_peaks]\n\n if indices is True:\n return coordinates\n else:\n nd_indices = tuple(coordinates.T)\n out[nd_indices] = True\n return out\n", "path": "skimage/feature/peak.py"}], "after_files": [{"content": "import numpy as np\nimport scipy.ndimage as ndi\nfrom ..filters import rank_order\n\n\ndef peak_local_max(image, min_distance=10, threshold_abs=0, threshold_rel=0.1,\n exclude_border=True, indices=True, num_peaks=np.inf,\n footprint=None, labels=None):\n \"\"\"\n Find peaks in an image, and return them as coordinates or a boolean array.\n\n Peaks are the local maxima in a region of `2 * min_distance + 1`\n (i.e. peaks are separated by at least `min_distance`).\n\n NOTE: If peaks are flat (i.e. multiple adjacent pixels have identical\n intensities), the coordinates of all such pixels are returned.\n\n Parameters\n ----------\n image : ndarray of floats\n Input image.\n min_distance : int\n Minimum number of pixels separating peaks in a region of `2 *\n min_distance + 1` (i.e. peaks are separated by at least\n `min_distance`). If `exclude_border` is True, this value also excludes\n a border `min_distance` from the image boundary.\n To find the maximum number of peaks, use `min_distance=1`.\n threshold_abs : float\n Minimum intensity of peaks.\n threshold_rel : float\n Minimum intensity of peaks calculated as `max(image) * threshold_rel`.\n exclude_border : bool\n If True, `min_distance` excludes peaks from the border of the image as\n well as from each other.\n indices : bool\n If True, the output will be an array representing peak coordinates.\n If False, the output will be a boolean array shaped as `image.shape`\n with peaks present at True elements.\n num_peaks : int\n Maximum number of peaks. When the number of peaks exceeds `num_peaks`,\n return `num_peaks` peaks based on highest peak intensity.\n footprint : ndarray of bools, optional\n If provided, `footprint == 1` represents the local region within which\n to search for peaks at every point in `image`. Overrides\n `min_distance`, except for border exclusion if `exclude_border=True`.\n labels : ndarray of ints, optional\n If provided, each unique region `labels == value` represents a unique\n region to search for peaks. Zero is reserved for background.\n\n Returns\n -------\n output : ndarray or ndarray of bools\n\n * If `indices = True` : (row, column, ...) coordinates of peaks.\n * If `indices = False` : Boolean array shaped like `image`, with peaks\n represented by True values.\n\n Notes\n -----\n The peak local maximum function returns the coordinates of local peaks\n (maxima) in a image. A maximum filter is used for finding local maxima.\n This operation dilates the original image. After comparison between\n dilated and original image, peak_local_max function returns the\n coordinates of peaks where dilated image = original.\n\n Examples\n --------\n >>> img1 = np.zeros((7, 7))\n >>> img1[3, 4] = 1\n >>> img1[3, 2] = 1.5\n >>> img1\n array([[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [ 0. , 0. , 1.5, 0. , 1. , 0. , 0. ],\n [ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [ 0. , 0. , 0. , 0. , 0. , 0. , 0. ]])\n\n >>> peak_local_max(img1, min_distance=1)\n array([[3, 2],\n [3, 4]])\n\n >>> peak_local_max(img1, min_distance=2)\n array([[3, 2]])\n\n >>> img2 = np.zeros((20, 20, 20))\n >>> img2[10, 10, 10] = 1\n >>> peak_local_max(img2, exclude_border=False)\n array([[10, 10, 10]])\n\n \"\"\"\n out = np.zeros_like(image, dtype=np.bool)\n # In the case of labels, recursively build and return an output\n # operating on each label separately\n if labels is not None:\n label_values = np.unique(labels)\n # Reorder label values to have consecutive integers (no gaps)\n if np.any(np.diff(label_values) != 1):\n mask = labels >= 1\n labels[mask] = 1 + rank_order(labels[mask])[0].astype(labels.dtype)\n labels = labels.astype(np.int32)\n\n # New values for new ordering\n label_values = np.unique(labels)\n for label in label_values[label_values != 0]:\n maskim = (labels == label)\n out += peak_local_max(image * maskim, min_distance=min_distance,\n threshold_abs=threshold_abs,\n threshold_rel=threshold_rel,\n exclude_border=exclude_border,\n indices=False, num_peaks=np.inf,\n footprint=footprint, labels=None)\n\n if indices is True:\n return np.transpose(out.nonzero())\n else:\n return out.astype(np.bool)\n\n if np.all(image == image.flat[0]):\n if indices is True:\n return np.empty((0, 2), np.int)\n else:\n return out\n\n image = image.copy()\n # Non maximum filter\n if footprint is not None:\n image_max = ndi.maximum_filter(image, footprint=footprint,\n mode='constant')\n else:\n size = 2 * min_distance + 1\n image_max = ndi.maximum_filter(image, size=size, mode='constant')\n mask = (image == image_max)\n image *= mask\n\n if exclude_border:\n # zero out the image borders\n for i in range(image.ndim):\n image = image.swapaxes(0, i)\n image[:min_distance] = 0\n image[-min_distance:] = 0\n image = image.swapaxes(0, i)\n\n # find top peak candidates above a threshold\n peak_threshold = max(np.max(image.ravel()) * threshold_rel, threshold_abs)\n\n # get coordinates of peaks\n coordinates = np.argwhere(image > peak_threshold)\n\n if coordinates.shape[0] > num_peaks:\n intensities = image.flat[np.ravel_multi_index(coordinates.transpose(),image.shape)]\n idx_maxsort = np.argsort(intensities)[::-1]\n coordinates = coordinates[idx_maxsort][:num_peaks]\n\n if indices is True:\n return coordinates\n else:\n nd_indices = tuple(coordinates.T)\n out[nd_indices] = True\n return out\n", "path": "skimage/feature/peak.py"}]}
| 2,377 | 101 |
gh_patches_debug_38309
|
rasdani/github-patches
|
git_diff
|
tornadoweb__tornado-2562
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update release notes and set version to 5.0b1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # Ensure we get the local copy of tornado instead of what's on the standard path
2 import os
3 import sys
4 import time
5 sys.path.insert(0, os.path.abspath(".."))
6 import tornado
7
8 master_doc = "index"
9
10 project = "Tornado"
11 copyright = "2009-%s, The Tornado Authors" % time.strftime("%Y")
12
13 version = release = tornado.version
14
15 extensions = [
16 "sphinx.ext.autodoc",
17 "sphinx.ext.coverage",
18 "sphinx.ext.doctest",
19 "sphinx.ext.intersphinx",
20 "sphinx.ext.viewcode",
21 ]
22
23 primary_domain = 'py'
24 default_role = 'py:obj'
25
26 autodoc_member_order = "bysource"
27 autoclass_content = "both"
28 autodoc_inherit_docstrings = False
29
30 # Without this line sphinx includes a copy of object.__init__'s docstring
31 # on any class that doesn't define __init__.
32 # https://bitbucket.org/birkenfeld/sphinx/issue/1337/autoclass_content-both-uses-object__init__
33 autodoc_docstring_signature = False
34
35 coverage_skip_undoc_in_source = True
36 coverage_ignore_modules = [
37 "tornado.platform.asyncio",
38 "tornado.platform.caresresolver",
39 "tornado.platform.twisted",
40 ]
41 # I wish this could go in a per-module file...
42 coverage_ignore_classes = [
43 # tornado.gen
44 "Runner",
45
46 # tornado.web
47 "ChunkedTransferEncoding",
48 "GZipContentEncoding",
49 "OutputTransform",
50 "TemplateModule",
51 "url",
52
53 # tornado.websocket
54 "WebSocketProtocol",
55 "WebSocketProtocol13",
56 "WebSocketProtocol76",
57 ]
58
59 coverage_ignore_functions = [
60 # various modules
61 "doctests",
62 "main",
63
64 # tornado.escape
65 # parse_qs_bytes should probably be documented but it's complicated by
66 # having different implementations between py2 and py3.
67 "parse_qs_bytes",
68
69 # tornado.gen
70 "Multi",
71 ]
72
73 html_favicon = 'favicon.ico'
74
75 latex_documents = [
76 ('index', 'tornado.tex', 'Tornado Documentation', 'The Tornado Authors', 'manual', False),
77 ]
78
79 intersphinx_mapping = {
80 'python': ('https://docs.python.org/3.6/', None),
81 }
82
83 on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
84
85 # On RTD we can't import sphinx_rtd_theme, but it will be applied by
86 # default anyway. This block will use the same theme when building locally
87 # as on RTD.
88 if not on_rtd:
89 import sphinx_rtd_theme
90 html_theme = 'sphinx_rtd_theme'
91 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -1,14 +1,14 @@
# Ensure we get the local copy of tornado instead of what's on the standard path
import os
import sys
-import time
+
sys.path.insert(0, os.path.abspath(".."))
import tornado
master_doc = "index"
project = "Tornado"
-copyright = "2009-%s, The Tornado Authors" % time.strftime("%Y")
+copyright = "The Tornado Authors"
version = release = tornado.version
@@ -20,8 +20,8 @@
"sphinx.ext.viewcode",
]
-primary_domain = 'py'
-default_role = 'py:obj'
+primary_domain = "py"
+default_role = "py:obj"
autodoc_member_order = "bysource"
autoclass_content = "both"
@@ -42,14 +42,12 @@
coverage_ignore_classes = [
# tornado.gen
"Runner",
-
# tornado.web
"ChunkedTransferEncoding",
"GZipContentEncoding",
"OutputTransform",
"TemplateModule",
"url",
-
# tornado.websocket
"WebSocketProtocol",
"WebSocketProtocol13",
@@ -60,32 +58,36 @@
# various modules
"doctests",
"main",
-
# tornado.escape
# parse_qs_bytes should probably be documented but it's complicated by
# having different implementations between py2 and py3.
"parse_qs_bytes",
-
# tornado.gen
"Multi",
]
-html_favicon = 'favicon.ico'
+html_favicon = "favicon.ico"
latex_documents = [
- ('index', 'tornado.tex', 'Tornado Documentation', 'The Tornado Authors', 'manual', False),
+ (
+ "index",
+ "tornado.tex",
+ "Tornado Documentation",
+ "The Tornado Authors",
+ "manual",
+ False,
+ )
]
-intersphinx_mapping = {
- 'python': ('https://docs.python.org/3.6/', None),
-}
+intersphinx_mapping = {"python": ("https://docs.python.org/3.6/", None)}
-on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
+on_rtd = os.environ.get("READTHEDOCS", None) == "True"
# On RTD we can't import sphinx_rtd_theme, but it will be applied by
# default anyway. This block will use the same theme when building locally
# as on RTD.
if not on_rtd:
import sphinx_rtd_theme
- html_theme = 'sphinx_rtd_theme'
+
+ html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -1,14 +1,14 @@\n # Ensure we get the local copy of tornado instead of what's on the standard path\n import os\n import sys\n-import time\n+\n sys.path.insert(0, os.path.abspath(\"..\"))\n import tornado\n \n master_doc = \"index\"\n \n project = \"Tornado\"\n-copyright = \"2009-%s, The Tornado Authors\" % time.strftime(\"%Y\")\n+copyright = \"The Tornado Authors\"\n \n version = release = tornado.version\n \n@@ -20,8 +20,8 @@\n \"sphinx.ext.viewcode\",\n ]\n \n-primary_domain = 'py'\n-default_role = 'py:obj'\n+primary_domain = \"py\"\n+default_role = \"py:obj\"\n \n autodoc_member_order = \"bysource\"\n autoclass_content = \"both\"\n@@ -42,14 +42,12 @@\n coverage_ignore_classes = [\n # tornado.gen\n \"Runner\",\n-\n # tornado.web\n \"ChunkedTransferEncoding\",\n \"GZipContentEncoding\",\n \"OutputTransform\",\n \"TemplateModule\",\n \"url\",\n-\n # tornado.websocket\n \"WebSocketProtocol\",\n \"WebSocketProtocol13\",\n@@ -60,32 +58,36 @@\n # various modules\n \"doctests\",\n \"main\",\n-\n # tornado.escape\n # parse_qs_bytes should probably be documented but it's complicated by\n # having different implementations between py2 and py3.\n \"parse_qs_bytes\",\n-\n # tornado.gen\n \"Multi\",\n ]\n \n-html_favicon = 'favicon.ico'\n+html_favicon = \"favicon.ico\"\n \n latex_documents = [\n- ('index', 'tornado.tex', 'Tornado Documentation', 'The Tornado Authors', 'manual', False),\n+ (\n+ \"index\",\n+ \"tornado.tex\",\n+ \"Tornado Documentation\",\n+ \"The Tornado Authors\",\n+ \"manual\",\n+ False,\n+ )\n ]\n \n-intersphinx_mapping = {\n- 'python': ('https://docs.python.org/3.6/', None),\n-}\n+intersphinx_mapping = {\"python\": (\"https://docs.python.org/3.6/\", None)}\n \n-on_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n+on_rtd = os.environ.get(\"READTHEDOCS\", None) == \"True\"\n \n # On RTD we can't import sphinx_rtd_theme, but it will be applied by\n # default anyway. This block will use the same theme when building locally\n # as on RTD.\n if not on_rtd:\n import sphinx_rtd_theme\n- html_theme = 'sphinx_rtd_theme'\n+\n+ html_theme = \"sphinx_rtd_theme\"\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n", "issue": "Update release notes and set version to 5.0b1\n\n", "before_files": [{"content": "# Ensure we get the local copy of tornado instead of what's on the standard path\nimport os\nimport sys\nimport time\nsys.path.insert(0, os.path.abspath(\"..\"))\nimport tornado\n\nmaster_doc = \"index\"\n\nproject = \"Tornado\"\ncopyright = \"2009-%s, The Tornado Authors\" % time.strftime(\"%Y\")\n\nversion = release = tornado.version\n\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.viewcode\",\n]\n\nprimary_domain = 'py'\ndefault_role = 'py:obj'\n\nautodoc_member_order = \"bysource\"\nautoclass_content = \"both\"\nautodoc_inherit_docstrings = False\n\n# Without this line sphinx includes a copy of object.__init__'s docstring\n# on any class that doesn't define __init__.\n# https://bitbucket.org/birkenfeld/sphinx/issue/1337/autoclass_content-both-uses-object__init__\nautodoc_docstring_signature = False\n\ncoverage_skip_undoc_in_source = True\ncoverage_ignore_modules = [\n \"tornado.platform.asyncio\",\n \"tornado.platform.caresresolver\",\n \"tornado.platform.twisted\",\n]\n# I wish this could go in a per-module file...\ncoverage_ignore_classes = [\n # tornado.gen\n \"Runner\",\n\n # tornado.web\n \"ChunkedTransferEncoding\",\n \"GZipContentEncoding\",\n \"OutputTransform\",\n \"TemplateModule\",\n \"url\",\n\n # tornado.websocket\n \"WebSocketProtocol\",\n \"WebSocketProtocol13\",\n \"WebSocketProtocol76\",\n]\n\ncoverage_ignore_functions = [\n # various modules\n \"doctests\",\n \"main\",\n\n # tornado.escape\n # parse_qs_bytes should probably be documented but it's complicated by\n # having different implementations between py2 and py3.\n \"parse_qs_bytes\",\n\n # tornado.gen\n \"Multi\",\n]\n\nhtml_favicon = 'favicon.ico'\n\nlatex_documents = [\n ('index', 'tornado.tex', 'Tornado Documentation', 'The Tornado Authors', 'manual', False),\n]\n\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3.6/', None),\n}\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\n# On RTD we can't import sphinx_rtd_theme, but it will be applied by\n# default anyway. This block will use the same theme when building locally\n# as on RTD.\nif not on_rtd:\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n", "path": "docs/conf.py"}], "after_files": [{"content": "# Ensure we get the local copy of tornado instead of what's on the standard path\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath(\"..\"))\nimport tornado\n\nmaster_doc = \"index\"\n\nproject = \"Tornado\"\ncopyright = \"The Tornado Authors\"\n\nversion = release = tornado.version\n\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.viewcode\",\n]\n\nprimary_domain = \"py\"\ndefault_role = \"py:obj\"\n\nautodoc_member_order = \"bysource\"\nautoclass_content = \"both\"\nautodoc_inherit_docstrings = False\n\n# Without this line sphinx includes a copy of object.__init__'s docstring\n# on any class that doesn't define __init__.\n# https://bitbucket.org/birkenfeld/sphinx/issue/1337/autoclass_content-both-uses-object__init__\nautodoc_docstring_signature = False\n\ncoverage_skip_undoc_in_source = True\ncoverage_ignore_modules = [\n \"tornado.platform.asyncio\",\n \"tornado.platform.caresresolver\",\n \"tornado.platform.twisted\",\n]\n# I wish this could go in a per-module file...\ncoverage_ignore_classes = [\n # tornado.gen\n \"Runner\",\n # tornado.web\n \"ChunkedTransferEncoding\",\n \"GZipContentEncoding\",\n \"OutputTransform\",\n \"TemplateModule\",\n \"url\",\n # tornado.websocket\n \"WebSocketProtocol\",\n \"WebSocketProtocol13\",\n \"WebSocketProtocol76\",\n]\n\ncoverage_ignore_functions = [\n # various modules\n \"doctests\",\n \"main\",\n # tornado.escape\n # parse_qs_bytes should probably be documented but it's complicated by\n # having different implementations between py2 and py3.\n \"parse_qs_bytes\",\n # tornado.gen\n \"Multi\",\n]\n\nhtml_favicon = \"favicon.ico\"\n\nlatex_documents = [\n (\n \"index\",\n \"tornado.tex\",\n \"Tornado Documentation\",\n \"The Tornado Authors\",\n \"manual\",\n False,\n )\n]\n\nintersphinx_mapping = {\"python\": (\"https://docs.python.org/3.6/\", None)}\n\non_rtd = os.environ.get(\"READTHEDOCS\", None) == \"True\"\n\n# On RTD we can't import sphinx_rtd_theme, but it will be applied by\n# default anyway. This block will use the same theme when building locally\n# as on RTD.\nif not on_rtd:\n import sphinx_rtd_theme\n\n html_theme = \"sphinx_rtd_theme\"\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n", "path": "docs/conf.py"}]}
| 1,063 | 641 |
gh_patches_debug_15664
|
rasdani/github-patches
|
git_diff
|
getredash__redash-909
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error on adding modules to python datasource
I'm trying to add a module to a python datasource, but it's failing with this traceback
```
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1475, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1461, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python2.7/dist-packages/flask_restful/__init__.py", line 477, in wrapper
resp = resource(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/flask_login.py", line 792, in decorated_view
return func(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/flask/views.py", line 84, in view
return self.dispatch_request(*args, **kwargs)
File "/opt/redash/redash.0.9.2.b1536/redash/handlers/base.py", line 19, in dispatch_request
return super(BaseResource, self).dispatch_request(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/flask_restful/__init__.py", line 587, in dispatch_request
resp = meth(*args, **kwargs)
File "/opt/redash/redash.0.9.2.b1536/redash/permissions.py", line 40, in decorated
return fn(*args, **kwargs)
File "/opt/redash/redash.0.9.2.b1536/redash/handlers/data_sources.py", line 38, in post
data_source.options.update(req['options'])
File "/opt/redash/redash.0.9.2.b1536/redash/utils/configuration.py", line 56, in update
if k in self.schema['secret'] and v == SECRET_PLACEHOLDER:
KeyError: 'secret'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/utils/configuration.py`
Content:
```
1 import json
2 import jsonschema
3 from jsonschema import ValidationError
4
5 SECRET_PLACEHOLDER = '--------'
6
7
8 class ConfigurationContainer(object):
9 def __init__(self, config, schema=None):
10 self._config = config
11 self.set_schema(schema)
12
13 def set_schema(self, schema):
14 self._schema = schema
15
16 @property
17 def schema(self):
18 if self._schema is None:
19 raise RuntimeError("Schema missing.")
20
21 return self._schema
22
23 def is_valid(self):
24 try:
25 self.validate()
26 except (ValidationError, ValueError):
27 return False
28
29 return True
30
31 def validate(self):
32 jsonschema.validate(self._config, self._schema)
33
34 def to_json(self):
35 return json.dumps(self._config)
36
37 def iteritems(self):
38 return self._config.iteritems()
39
40 def to_dict(self, mask_secrets=False):
41 if (mask_secrets is False or 'secret' not in self.schema):
42 return self._config
43
44 config = self._config.copy()
45 for key in config:
46 if key in self.schema['secret']:
47 config[key] = SECRET_PLACEHOLDER
48
49 return config
50
51 def update(self, new_config):
52 jsonschema.validate(new_config, self.schema)
53
54 config = {}
55 for k, v in new_config.iteritems():
56 if k in self.schema['secret'] and v == SECRET_PLACEHOLDER:
57 config[k] = self[k]
58 else:
59 config[k] = v
60
61 self._config = config
62
63 def get(self, *args, **kwargs):
64 return self._config.get(*args, **kwargs)
65
66 def __getitem__(self, item):
67 if item in self._config:
68 return self._config[item]
69
70 raise KeyError(item)
71
72 def __contains__(self, item):
73 return item in self._config
74
75 @classmethod
76 def from_json(cls, config_in_json):
77 return cls(json.loads(config_in_json))
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/redash/utils/configuration.py b/redash/utils/configuration.py
--- a/redash/utils/configuration.py
+++ b/redash/utils/configuration.py
@@ -38,7 +38,7 @@
return self._config.iteritems()
def to_dict(self, mask_secrets=False):
- if (mask_secrets is False or 'secret' not in self.schema):
+ if mask_secrets is False or 'secret' not in self.schema:
return self._config
config = self._config.copy()
@@ -53,7 +53,7 @@
config = {}
for k, v in new_config.iteritems():
- if k in self.schema['secret'] and v == SECRET_PLACEHOLDER:
+ if k in self.schema.get('secret', []) and v == SECRET_PLACEHOLDER:
config[k] = self[k]
else:
config[k] = v
|
{"golden_diff": "diff --git a/redash/utils/configuration.py b/redash/utils/configuration.py\n--- a/redash/utils/configuration.py\n+++ b/redash/utils/configuration.py\n@@ -38,7 +38,7 @@\n return self._config.iteritems()\n \n def to_dict(self, mask_secrets=False):\n- if (mask_secrets is False or 'secret' not in self.schema):\n+ if mask_secrets is False or 'secret' not in self.schema:\n return self._config\n \n config = self._config.copy()\n@@ -53,7 +53,7 @@\n \n config = {}\n for k, v in new_config.iteritems():\n- if k in self.schema['secret'] and v == SECRET_PLACEHOLDER:\n+ if k in self.schema.get('secret', []) and v == SECRET_PLACEHOLDER:\n config[k] = self[k]\n else:\n config[k] = v\n", "issue": "Error on adding modules to python datasource\nI'm trying to add a module to a python datasource, but it's failing with this traceback\n\n```\nTraceback (most recent call last):\n File \"/usr/local/lib/python2.7/dist-packages/flask/app.py\", line 1475, in full_dispatch_request\n rv = self.dispatch_request()\n File \"/usr/local/lib/python2.7/dist-packages/flask/app.py\", line 1461, in dispatch_request\n return self.view_functions[rule.endpoint](**req.view_args)\n File \"/usr/local/lib/python2.7/dist-packages/flask_restful/__init__.py\", line 477, in wrapper\n resp = resource(*args, **kwargs)\n File \"/usr/local/lib/python2.7/dist-packages/flask_login.py\", line 792, in decorated_view\n return func(*args, **kwargs)\n File \"/usr/local/lib/python2.7/dist-packages/flask/views.py\", line 84, in view\n return self.dispatch_request(*args, **kwargs)\n File \"/opt/redash/redash.0.9.2.b1536/redash/handlers/base.py\", line 19, in dispatch_request\n return super(BaseResource, self).dispatch_request(*args, **kwargs)\n File \"/usr/local/lib/python2.7/dist-packages/flask_restful/__init__.py\", line 587, in dispatch_request\n resp = meth(*args, **kwargs)\n File \"/opt/redash/redash.0.9.2.b1536/redash/permissions.py\", line 40, in decorated\n return fn(*args, **kwargs)\n File \"/opt/redash/redash.0.9.2.b1536/redash/handlers/data_sources.py\", line 38, in post\n data_source.options.update(req['options'])\n File \"/opt/redash/redash.0.9.2.b1536/redash/utils/configuration.py\", line 56, in update\n if k in self.schema['secret'] and v == SECRET_PLACEHOLDER:\nKeyError: 'secret'\n```\n\n", "before_files": [{"content": "import json\nimport jsonschema\nfrom jsonschema import ValidationError\n\nSECRET_PLACEHOLDER = '--------'\n\n\nclass ConfigurationContainer(object):\n def __init__(self, config, schema=None):\n self._config = config\n self.set_schema(schema)\n\n def set_schema(self, schema):\n self._schema = schema\n\n @property\n def schema(self):\n if self._schema is None:\n raise RuntimeError(\"Schema missing.\")\n\n return self._schema\n\n def is_valid(self):\n try:\n self.validate()\n except (ValidationError, ValueError):\n return False\n\n return True\n\n def validate(self):\n jsonschema.validate(self._config, self._schema)\n\n def to_json(self):\n return json.dumps(self._config)\n\n def iteritems(self):\n return self._config.iteritems()\n\n def to_dict(self, mask_secrets=False):\n if (mask_secrets is False or 'secret' not in self.schema):\n return self._config\n\n config = self._config.copy()\n for key in config:\n if key in self.schema['secret']:\n config[key] = SECRET_PLACEHOLDER\n\n return config\n\n def update(self, new_config):\n jsonschema.validate(new_config, self.schema)\n\n config = {}\n for k, v in new_config.iteritems():\n if k in self.schema['secret'] and v == SECRET_PLACEHOLDER:\n config[k] = self[k]\n else:\n config[k] = v\n\n self._config = config\n\n def get(self, *args, **kwargs):\n return self._config.get(*args, **kwargs)\n\n def __getitem__(self, item):\n if item in self._config:\n return self._config[item]\n\n raise KeyError(item)\n\n def __contains__(self, item):\n return item in self._config\n\n @classmethod\n def from_json(cls, config_in_json):\n return cls(json.loads(config_in_json))\n", "path": "redash/utils/configuration.py"}], "after_files": [{"content": "import json\nimport jsonschema\nfrom jsonschema import ValidationError\n\nSECRET_PLACEHOLDER = '--------'\n\n\nclass ConfigurationContainer(object):\n def __init__(self, config, schema=None):\n self._config = config\n self.set_schema(schema)\n\n def set_schema(self, schema):\n self._schema = schema\n\n @property\n def schema(self):\n if self._schema is None:\n raise RuntimeError(\"Schema missing.\")\n\n return self._schema\n\n def is_valid(self):\n try:\n self.validate()\n except (ValidationError, ValueError):\n return False\n\n return True\n\n def validate(self):\n jsonschema.validate(self._config, self._schema)\n\n def to_json(self):\n return json.dumps(self._config)\n\n def iteritems(self):\n return self._config.iteritems()\n\n def to_dict(self, mask_secrets=False):\n if mask_secrets is False or 'secret' not in self.schema:\n return self._config\n\n config = self._config.copy()\n for key in config:\n if key in self.schema['secret']:\n config[key] = SECRET_PLACEHOLDER\n\n return config\n\n def update(self, new_config):\n jsonschema.validate(new_config, self.schema)\n\n config = {}\n for k, v in new_config.iteritems():\n if k in self.schema.get('secret', []) and v == SECRET_PLACEHOLDER:\n config[k] = self[k]\n else:\n config[k] = v\n\n self._config = config\n\n def get(self, *args, **kwargs):\n return self._config.get(*args, **kwargs)\n\n def __getitem__(self, item):\n if item in self._config:\n return self._config[item]\n\n raise KeyError(item)\n\n def __contains__(self, item):\n return item in self._config\n\n @classmethod\n def from_json(cls, config_in_json):\n return cls(json.loads(config_in_json))\n", "path": "redash/utils/configuration.py"}]}
| 1,295 | 193 |
gh_patches_debug_4079
|
rasdani/github-patches
|
git_diff
|
internetarchive__openlibrary-9112
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Edition Editing: language autocomplete is slow
### Problem
According to [this](https://internetarchive.slack.com/archives/C0119PRDV46/p1713446825373169?thread_ts=1713436300.354359&cid=C0119PRDV46) thread, it is very slow:
<img width="455" alt="Screenshot 2024-04-22 at 5 32 04 AM" src="https://github.com/internetarchive/openlibrary/assets/978325/54575542-e9a8-4452-a12a-5ed262897196">
#### Evidence / Screenshot
#### Relevant URL(s)
https://openlibrary.org/books/OL24938286M/Pacific_Vortex!/edit
### Reproducing the bug
1. Go to a work edit page
2. Try to add a language
* Expected behavior: Fast
* Actual behavior: Slow
### Context
Other keywords: dropdown, pulldown
### Notes from this Issue's Lead
#### Proposal & constraints
<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->
#### Related files
<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->
#### Stakeholders
<!-- @ tag stakeholders of this bug -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openlibrary/plugins/worksearch/autocomplete.py`
Content:
```
1 import itertools
2 import web
3 import json
4
5
6 from infogami.utils import delegate
7 from infogami.utils.view import safeint
8 from openlibrary.core.models import Thing
9 from openlibrary.plugins.upstream import utils
10 from openlibrary.plugins.worksearch.search import get_solr
11 from openlibrary.utils import (
12 find_olid_in_string,
13 olid_to_key,
14 )
15
16
17 def to_json(d):
18 web.header('Content-Type', 'application/json')
19 return delegate.RawText(json.dumps(d))
20
21
22 class autocomplete(delegate.page):
23 path = "/_autocomplete"
24 fq = ['-type:edition']
25 fl = 'key,type,name,title,score'
26 olid_suffix: str | None = None
27 sort: str | None = None
28 query = 'title:"{q}"^2 OR title:({q}*) OR name:"{q}"^2 OR name:({q}*)'
29
30 def db_fetch(self, key: str) -> Thing | None:
31 if thing := web.ctx.site.get(key):
32 return thing.as_fake_solr_record()
33 else:
34 return None
35
36 def doc_wrap(self, doc: dict):
37 """Modify the returned solr document in place."""
38 if 'name' not in doc:
39 doc['name'] = doc.get('title')
40
41 def doc_filter(self, doc: dict) -> bool:
42 """Exclude certain documents"""
43 return True
44
45 def GET(self):
46 return self.direct_get()
47
48 def direct_get(self, fq: list[str] | None = None):
49 i = web.input(q="", limit=5)
50 i.limit = safeint(i.limit, 5)
51
52 solr = get_solr()
53
54 # look for ID in query string here
55 q = solr.escape(i.q).strip()
56 embedded_olid = None
57 if self.olid_suffix:
58 embedded_olid = find_olid_in_string(q, self.olid_suffix)
59
60 if embedded_olid:
61 solr_q = f'key:"{olid_to_key(embedded_olid)}"'
62 else:
63 solr_q = self.query.format(q=q)
64
65 fq = fq or self.fq
66 params = {
67 'q_op': 'AND',
68 'rows': i.limit,
69 **({'fq': fq} if fq else {}),
70 # limit the fields returned for better performance
71 'fl': self.fl,
72 **({'sort': self.sort} if self.sort else {}),
73 }
74
75 data = solr.select(solr_q, **params)
76 docs = data['docs']
77
78 if embedded_olid and not docs:
79 # Grumble! Work not in solr yet. Create a dummy.
80 fake_doc = self.db_fetch(olid_to_key(embedded_olid))
81 if fake_doc:
82 docs = [fake_doc]
83
84 result_docs = []
85 for d in docs:
86 if self.doc_filter(d):
87 self.doc_wrap(d)
88 result_docs.append(d)
89
90 return to_json(result_docs)
91
92
93 class languages_autocomplete(delegate.page):
94 path = "/languages/_autocomplete"
95
96 def GET(self):
97 i = web.input(q="", limit=5)
98 i.limit = safeint(i.limit, 5)
99 return to_json(
100 list(itertools.islice(utils.autocomplete_languages(i.q), i.limit))
101 )
102
103
104 class works_autocomplete(autocomplete):
105 path = "/works/_autocomplete"
106 fq = ['type:work']
107 fl = 'key,title,subtitle,cover_i,first_publish_year,author_name,edition_count'
108 olid_suffix = 'W'
109 query = 'title:"{q}"^2 OR title:({q}*)'
110
111 def doc_filter(self, doc: dict) -> bool:
112 # Exclude orphaned editions from autocomplete results
113 # Note: Do this here instead of with an `fq=key:*W` for performance
114 # reasons.
115 return doc['key'][-1] == 'W'
116
117 def doc_wrap(self, doc: dict):
118 doc['full_title'] = doc['title']
119 if 'subtitle' in doc:
120 doc['full_title'] += ": " + doc['subtitle']
121 doc['name'] = doc.get('title')
122
123
124 class authors_autocomplete(autocomplete):
125 path = "/authors/_autocomplete"
126 fq = ['type:author']
127 fl = 'key,name,alternate_names,birth_date,death_date,work_count,top_work,top_subjects'
128 olid_suffix = 'A'
129 query = 'name:({q}*) OR alternate_names:({q}*) OR name:"{q}"^2 OR alternate_names:"{q}"^2'
130
131 def doc_wrap(self, doc: dict):
132 if 'top_work' in doc:
133 doc['works'] = [doc.pop('top_work')]
134 else:
135 doc['works'] = []
136 doc['subjects'] = doc.pop('top_subjects', [])
137
138
139 class subjects_autocomplete(autocomplete):
140 # can't use /subjects/_autocomplete because the subjects endpoint = /subjects/[^/]+
141 path = "/subjects_autocomplete"
142 fq = ['type:subject']
143 fl = 'key,name,work_count'
144 query = 'name:({q}*)'
145 sort = 'work_count desc'
146
147 def GET(self):
148 i = web.input(type="")
149 fq = self.fq
150 if i.type:
151 fq = fq + [f'subject_type:{i.type}']
152
153 return super().direct_get(fq=fq)
154
155
156 def setup():
157 """Do required setup."""
158 pass
159
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/openlibrary/plugins/worksearch/autocomplete.py b/openlibrary/plugins/worksearch/autocomplete.py
--- a/openlibrary/plugins/worksearch/autocomplete.py
+++ b/openlibrary/plugins/worksearch/autocomplete.py
@@ -96,6 +96,7 @@
def GET(self):
i = web.input(q="", limit=5)
i.limit = safeint(i.limit, 5)
+ web.header("Cache-Control", "max-age=%d" % (24 * 3600))
return to_json(
list(itertools.islice(utils.autocomplete_languages(i.q), i.limit))
)
|
{"golden_diff": "diff --git a/openlibrary/plugins/worksearch/autocomplete.py b/openlibrary/plugins/worksearch/autocomplete.py\n--- a/openlibrary/plugins/worksearch/autocomplete.py\n+++ b/openlibrary/plugins/worksearch/autocomplete.py\n@@ -96,6 +96,7 @@\n def GET(self):\n i = web.input(q=\"\", limit=5)\n i.limit = safeint(i.limit, 5)\n+ web.header(\"Cache-Control\", \"max-age=%d\" % (24 * 3600))\n return to_json(\n list(itertools.islice(utils.autocomplete_languages(i.q), i.limit))\n )\n", "issue": "Edition Editing: language autocomplete is slow\n### Problem\r\n\r\nAccording to [this](https://internetarchive.slack.com/archives/C0119PRDV46/p1713446825373169?thread_ts=1713436300.354359&cid=C0119PRDV46) thread, it is very slow:\r\n\r\n<img width=\"455\" alt=\"Screenshot 2024-04-22 at 5 32 04 AM\" src=\"https://github.com/internetarchive/openlibrary/assets/978325/54575542-e9a8-4452-a12a-5ed262897196\">\r\n\r\n#### Evidence / Screenshot\r\n\r\n#### Relevant URL(s)\r\nhttps://openlibrary.org/books/OL24938286M/Pacific_Vortex!/edit\r\n\r\n### Reproducing the bug\r\n\r\n1. Go to a work edit page\r\n2. Try to add a language\r\n\r\n* Expected behavior: Fast\r\n* Actual behavior: Slow\r\n\r\n\r\n### Context\r\n\r\nOther keywords: dropdown, pulldown\r\n\r\n### Notes from this Issue's Lead\r\n\r\n#### Proposal & constraints\r\n<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->\r\n\r\n#### Related files\r\n<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->\r\n\r\n#### Stakeholders\r\n<!-- @ tag stakeholders of this bug -->\r\n\n", "before_files": [{"content": "import itertools\nimport web\nimport json\n\n\nfrom infogami.utils import delegate\nfrom infogami.utils.view import safeint\nfrom openlibrary.core.models import Thing\nfrom openlibrary.plugins.upstream import utils\nfrom openlibrary.plugins.worksearch.search import get_solr\nfrom openlibrary.utils import (\n find_olid_in_string,\n olid_to_key,\n)\n\n\ndef to_json(d):\n web.header('Content-Type', 'application/json')\n return delegate.RawText(json.dumps(d))\n\n\nclass autocomplete(delegate.page):\n path = \"/_autocomplete\"\n fq = ['-type:edition']\n fl = 'key,type,name,title,score'\n olid_suffix: str | None = None\n sort: str | None = None\n query = 'title:\"{q}\"^2 OR title:({q}*) OR name:\"{q}\"^2 OR name:({q}*)'\n\n def db_fetch(self, key: str) -> Thing | None:\n if thing := web.ctx.site.get(key):\n return thing.as_fake_solr_record()\n else:\n return None\n\n def doc_wrap(self, doc: dict):\n \"\"\"Modify the returned solr document in place.\"\"\"\n if 'name' not in doc:\n doc['name'] = doc.get('title')\n\n def doc_filter(self, doc: dict) -> bool:\n \"\"\"Exclude certain documents\"\"\"\n return True\n\n def GET(self):\n return self.direct_get()\n\n def direct_get(self, fq: list[str] | None = None):\n i = web.input(q=\"\", limit=5)\n i.limit = safeint(i.limit, 5)\n\n solr = get_solr()\n\n # look for ID in query string here\n q = solr.escape(i.q).strip()\n embedded_olid = None\n if self.olid_suffix:\n embedded_olid = find_olid_in_string(q, self.olid_suffix)\n\n if embedded_olid:\n solr_q = f'key:\"{olid_to_key(embedded_olid)}\"'\n else:\n solr_q = self.query.format(q=q)\n\n fq = fq or self.fq\n params = {\n 'q_op': 'AND',\n 'rows': i.limit,\n **({'fq': fq} if fq else {}),\n # limit the fields returned for better performance\n 'fl': self.fl,\n **({'sort': self.sort} if self.sort else {}),\n }\n\n data = solr.select(solr_q, **params)\n docs = data['docs']\n\n if embedded_olid and not docs:\n # Grumble! Work not in solr yet. Create a dummy.\n fake_doc = self.db_fetch(olid_to_key(embedded_olid))\n if fake_doc:\n docs = [fake_doc]\n\n result_docs = []\n for d in docs:\n if self.doc_filter(d):\n self.doc_wrap(d)\n result_docs.append(d)\n\n return to_json(result_docs)\n\n\nclass languages_autocomplete(delegate.page):\n path = \"/languages/_autocomplete\"\n\n def GET(self):\n i = web.input(q=\"\", limit=5)\n i.limit = safeint(i.limit, 5)\n return to_json(\n list(itertools.islice(utils.autocomplete_languages(i.q), i.limit))\n )\n\n\nclass works_autocomplete(autocomplete):\n path = \"/works/_autocomplete\"\n fq = ['type:work']\n fl = 'key,title,subtitle,cover_i,first_publish_year,author_name,edition_count'\n olid_suffix = 'W'\n query = 'title:\"{q}\"^2 OR title:({q}*)'\n\n def doc_filter(self, doc: dict) -> bool:\n # Exclude orphaned editions from autocomplete results\n # Note: Do this here instead of with an `fq=key:*W` for performance\n # reasons.\n return doc['key'][-1] == 'W'\n\n def doc_wrap(self, doc: dict):\n doc['full_title'] = doc['title']\n if 'subtitle' in doc:\n doc['full_title'] += \": \" + doc['subtitle']\n doc['name'] = doc.get('title')\n\n\nclass authors_autocomplete(autocomplete):\n path = \"/authors/_autocomplete\"\n fq = ['type:author']\n fl = 'key,name,alternate_names,birth_date,death_date,work_count,top_work,top_subjects'\n olid_suffix = 'A'\n query = 'name:({q}*) OR alternate_names:({q}*) OR name:\"{q}\"^2 OR alternate_names:\"{q}\"^2'\n\n def doc_wrap(self, doc: dict):\n if 'top_work' in doc:\n doc['works'] = [doc.pop('top_work')]\n else:\n doc['works'] = []\n doc['subjects'] = doc.pop('top_subjects', [])\n\n\nclass subjects_autocomplete(autocomplete):\n # can't use /subjects/_autocomplete because the subjects endpoint = /subjects/[^/]+\n path = \"/subjects_autocomplete\"\n fq = ['type:subject']\n fl = 'key,name,work_count'\n query = 'name:({q}*)'\n sort = 'work_count desc'\n\n def GET(self):\n i = web.input(type=\"\")\n fq = self.fq\n if i.type:\n fq = fq + [f'subject_type:{i.type}']\n\n return super().direct_get(fq=fq)\n\n\ndef setup():\n \"\"\"Do required setup.\"\"\"\n pass\n", "path": "openlibrary/plugins/worksearch/autocomplete.py"}], "after_files": [{"content": "import itertools\nimport web\nimport json\n\n\nfrom infogami.utils import delegate\nfrom infogami.utils.view import safeint\nfrom openlibrary.core.models import Thing\nfrom openlibrary.plugins.upstream import utils\nfrom openlibrary.plugins.worksearch.search import get_solr\nfrom openlibrary.utils import (\n find_olid_in_string,\n olid_to_key,\n)\n\n\ndef to_json(d):\n web.header('Content-Type', 'application/json')\n return delegate.RawText(json.dumps(d))\n\n\nclass autocomplete(delegate.page):\n path = \"/_autocomplete\"\n fq = ['-type:edition']\n fl = 'key,type,name,title,score'\n olid_suffix: str | None = None\n sort: str | None = None\n query = 'title:\"{q}\"^2 OR title:({q}*) OR name:\"{q}\"^2 OR name:({q}*)'\n\n def db_fetch(self, key: str) -> Thing | None:\n if thing := web.ctx.site.get(key):\n return thing.as_fake_solr_record()\n else:\n return None\n\n def doc_wrap(self, doc: dict):\n \"\"\"Modify the returned solr document in place.\"\"\"\n if 'name' not in doc:\n doc['name'] = doc.get('title')\n\n def doc_filter(self, doc: dict) -> bool:\n \"\"\"Exclude certain documents\"\"\"\n return True\n\n def GET(self):\n return self.direct_get()\n\n def direct_get(self, fq: list[str] | None = None):\n i = web.input(q=\"\", limit=5)\n i.limit = safeint(i.limit, 5)\n\n solr = get_solr()\n\n # look for ID in query string here\n q = solr.escape(i.q).strip()\n embedded_olid = None\n if self.olid_suffix:\n embedded_olid = find_olid_in_string(q, self.olid_suffix)\n\n if embedded_olid:\n solr_q = f'key:\"{olid_to_key(embedded_olid)}\"'\n else:\n solr_q = self.query.format(q=q)\n\n fq = fq or self.fq\n params = {\n 'q_op': 'AND',\n 'rows': i.limit,\n **({'fq': fq} if fq else {}),\n # limit the fields returned for better performance\n 'fl': self.fl,\n **({'sort': self.sort} if self.sort else {}),\n }\n\n data = solr.select(solr_q, **params)\n docs = data['docs']\n\n if embedded_olid and not docs:\n # Grumble! Work not in solr yet. Create a dummy.\n fake_doc = self.db_fetch(olid_to_key(embedded_olid))\n if fake_doc:\n docs = [fake_doc]\n\n result_docs = []\n for d in docs:\n if self.doc_filter(d):\n self.doc_wrap(d)\n result_docs.append(d)\n\n return to_json(result_docs)\n\n\nclass languages_autocomplete(delegate.page):\n path = \"/languages/_autocomplete\"\n\n def GET(self):\n i = web.input(q=\"\", limit=5)\n i.limit = safeint(i.limit, 5)\n web.header(\"Cache-Control\", \"max-age=%d\" % (24 * 3600))\n return to_json(\n list(itertools.islice(utils.autocomplete_languages(i.q), i.limit))\n )\n\n\nclass works_autocomplete(autocomplete):\n path = \"/works/_autocomplete\"\n fq = ['type:work']\n fl = 'key,title,subtitle,cover_i,first_publish_year,author_name,edition_count'\n olid_suffix = 'W'\n query = 'title:\"{q}\"^2 OR title:({q}*)'\n\n def doc_filter(self, doc: dict) -> bool:\n # Exclude orphaned editions from autocomplete results\n # Note: Do this here instead of with an `fq=key:*W` for performance\n # reasons.\n return doc['key'][-1] == 'W'\n\n def doc_wrap(self, doc: dict):\n doc['full_title'] = doc['title']\n if 'subtitle' in doc:\n doc['full_title'] += \": \" + doc['subtitle']\n doc['name'] = doc.get('title')\n\n\nclass authors_autocomplete(autocomplete):\n path = \"/authors/_autocomplete\"\n fq = ['type:author']\n fl = 'key,name,alternate_names,birth_date,death_date,work_count,top_work,top_subjects'\n olid_suffix = 'A'\n query = 'name:({q}*) OR alternate_names:({q}*) OR name:\"{q}\"^2 OR alternate_names:\"{q}\"^2'\n\n def doc_wrap(self, doc: dict):\n if 'top_work' in doc:\n doc['works'] = [doc.pop('top_work')]\n else:\n doc['works'] = []\n doc['subjects'] = doc.pop('top_subjects', [])\n\n\nclass subjects_autocomplete(autocomplete):\n # can't use /subjects/_autocomplete because the subjects endpoint = /subjects/[^/]+\n path = \"/subjects_autocomplete\"\n fq = ['type:subject']\n fl = 'key,name,work_count'\n query = 'name:({q}*)'\n sort = 'work_count desc'\n\n def GET(self):\n i = web.input(type=\"\")\n fq = self.fq\n if i.type:\n fq = fq + [f'subject_type:{i.type}']\n\n return super().direct_get(fq=fq)\n\n\ndef setup():\n \"\"\"Do required setup.\"\"\"\n pass\n", "path": "openlibrary/plugins/worksearch/autocomplete.py"}]}
| 2,170 | 132 |
gh_patches_debug_28248
|
rasdani/github-patches
|
git_diff
|
open-mmlab__mmpretrain-548
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Focal loss for single label classification?
### Checklist
- I have searched related issues but cannot get the expected help.
- I have read related documents and don't know what to do.
### Describe the question you meet
I'm trying to train a resnet18 with a LinearClsHead using the focal loss for a single label binary classification problem, but the current focal loss implementation raises an assertion error as the predictions have shape [batch_size, 2] and target has shape [batch_size]. According to the documentation the implemented focal loss only works for multilabel tasks. Is there any way of using it also for single label tasks?
### Post related information
1. The output of `pip list | grep "mmcv\|mmcls\|^torch"`
mmcls 0.17.0
mmcv-full 1.3.9
torch 1.9.0
torch-model-archiver 0.4.1
torchvision 0.10.0
2. This is the model config I'm using:
```python
# type: ignore
model = dict(
type="ImageClassifier",
backbone=dict(
type="ResNet", depth=18, num_stages=4, out_indices=(3,), style="pytorch"
),
neck=dict(type="GlobalAveragePooling"),
head=dict(
type="LinearClsHead",
num_classes=2,
in_channels=512,
loss=dict(type="FocalLoss", loss_weight=1.0),
topk=(1),
),
)
load_from = "/media/VA/pretrained_weights/mmcls/resnet18_batch256_20200708-34ab8f90.pth"
```
3. I am getting the following error during training:
```
File "/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/mmcls/apis/train.py", line 164, in train_model
runner.run(data_loaders, cfg.workflow)
File "/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/mmcv/runner/epoch_based_runner.py", line 127, in run
epoch_runner(data_loaders[i], **kwargs)
File "/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/mmcv/runner/epoch_based_runner.py", line 50, in train
self.run_iter(data_batch, train_mode=True, **kwargs)
File "/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/mmcv/runner/epoch_based_runner.py", line 29, in run_iter
outputs = self.model.train_step(data_batch, self.optimizer,
File "/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/mmcv/parallel/data_parallel.py", line 67, in train_step
return self.module.train_step(*inputs[0], **kwargs[0])
File "/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/mmcls/models/classifiers/base.py", line 146, in train_step
losses = self(**data)
File "/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/mmcv/runner/fp16_utils.py", line 98, in new_func
return old_func(*args, **kwargs)
File "/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/mmcls/models/classifiers/base.py", line 90, in forward
return self.forward_train(img, **kwargs)
File "/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/mmcls/models/classifiers/image.py", line 110, in forward_train
loss = self.head.forward_train(x, gt_label)
File "/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/mmcls/models/heads/linear_head.py", line 53, in forward_train
losses = self.loss(cls_score, gt_label)
File "/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/mmcls/models/heads/cls_head.py", line 46, in loss
loss = self.compute_loss(cls_score, gt_label, avg_factor=num_samples)
File "/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/mmcls/models/losses/focal_loss.py", line 106, in forward
loss_cls = self.loss_weight * sigmoid_focal_loss(
File "/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/mmcls/models/losses/focal_loss.py", line 36, in sigmoid_focal_loss
assert pred.shape == \
AssertionError: pred and target should be in the same shape.
```
Thank you!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmcls/models/losses/focal_loss.py`
Content:
```
1 # Copyright (c) OpenMMLab. All rights reserved.
2 import torch.nn as nn
3 import torch.nn.functional as F
4
5 from ..builder import LOSSES
6 from .utils import weight_reduce_loss
7
8
9 def sigmoid_focal_loss(pred,
10 target,
11 weight=None,
12 gamma=2.0,
13 alpha=0.25,
14 reduction='mean',
15 avg_factor=None):
16 r"""Sigmoid focal loss.
17
18 Args:
19 pred (torch.Tensor): The prediction with shape (N, \*).
20 target (torch.Tensor): The ground truth label of the prediction with
21 shape (N, \*).
22 weight (torch.Tensor, optional): Sample-wise loss weight with shape
23 (N, ). Defaults to None.
24 gamma (float): The gamma for calculating the modulating factor.
25 Defaults to 2.0.
26 alpha (float): A balanced form for Focal Loss. Defaults to 0.25.
27 reduction (str): The method used to reduce the loss.
28 Options are "none", "mean" and "sum". If reduction is 'none' ,
29 loss is same shape as pred and label. Defaults to 'mean'.
30 avg_factor (int, optional): Average factor that is used to average
31 the loss. Defaults to None.
32
33 Returns:
34 torch.Tensor: Loss.
35 """
36 assert pred.shape == \
37 target.shape, 'pred and target should be in the same shape.'
38 pred_sigmoid = pred.sigmoid()
39 target = target.type_as(pred)
40 pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
41 focal_weight = (alpha * target + (1 - alpha) *
42 (1 - target)) * pt.pow(gamma)
43 loss = F.binary_cross_entropy_with_logits(
44 pred, target, reduction='none') * focal_weight
45 if weight is not None:
46 assert weight.dim() == 1
47 weight = weight.float()
48 if pred.dim() > 1:
49 weight = weight.reshape(-1, 1)
50 loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
51 return loss
52
53
54 @LOSSES.register_module()
55 class FocalLoss(nn.Module):
56 """Focal loss.
57
58 Args:
59 gamma (float): Focusing parameter in focal loss.
60 Defaults to 2.0.
61 alpha (float): The parameter in balanced form of focal
62 loss. Defaults to 0.25.
63 reduction (str): The method used to reduce the loss into
64 a scalar. Options are "none" and "mean". Defaults to 'mean'.
65 loss_weight (float): Weight of loss. Defaults to 1.0.
66 """
67
68 def __init__(self,
69 gamma=2.0,
70 alpha=0.25,
71 reduction='mean',
72 loss_weight=1.0):
73
74 super(FocalLoss, self).__init__()
75 self.gamma = gamma
76 self.alpha = alpha
77 self.reduction = reduction
78 self.loss_weight = loss_weight
79
80 def forward(self,
81 pred,
82 target,
83 weight=None,
84 avg_factor=None,
85 reduction_override=None):
86 r"""Sigmoid focal loss.
87
88 Args:
89 pred (torch.Tensor): The prediction with shape (N, \*).
90 target (torch.Tensor): The ground truth label of the prediction
91 with shape (N, \*).
92 weight (torch.Tensor, optional): Sample-wise loss weight with shape
93 (N, \*). Defaults to None.
94 avg_factor (int, optional): Average factor that is used to average
95 the loss. Defaults to None.
96 reduction_override (str, optional): The method used to reduce the
97 loss into a scalar. Options are "none", "mean" and "sum".
98 Defaults to None.
99
100 Returns:
101 torch.Tensor: Loss.
102 """
103 assert reduction_override in (None, 'none', 'mean', 'sum')
104 reduction = (
105 reduction_override if reduction_override else self.reduction)
106 loss_cls = self.loss_weight * sigmoid_focal_loss(
107 pred,
108 target,
109 weight,
110 gamma=self.gamma,
111 alpha=self.alpha,
112 reduction=reduction,
113 avg_factor=avg_factor)
114 return loss_cls
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mmcls/models/losses/focal_loss.py b/mmcls/models/losses/focal_loss.py
--- a/mmcls/models/losses/focal_loss.py
+++ b/mmcls/models/losses/focal_loss.py
@@ -3,7 +3,7 @@
import torch.nn.functional as F
from ..builder import LOSSES
-from .utils import weight_reduce_loss
+from .utils import convert_to_one_hot, weight_reduce_loss
def sigmoid_focal_loss(pred,
@@ -88,7 +88,7 @@
Args:
pred (torch.Tensor): The prediction with shape (N, \*).
target (torch.Tensor): The ground truth label of the prediction
- with shape (N, \*).
+ with shape (N, \*), N or (N,1).
weight (torch.Tensor, optional): Sample-wise loss weight with shape
(N, \*). Defaults to None.
avg_factor (int, optional): Average factor that is used to average
@@ -103,6 +103,8 @@
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
+ if target.dim() == 1 or (target.dim() == 2 and target.shape[1] == 1):
+ target = convert_to_one_hot(target.view(-1, 1), pred.shape[-1])
loss_cls = self.loss_weight * sigmoid_focal_loss(
pred,
target,
|
{"golden_diff": "diff --git a/mmcls/models/losses/focal_loss.py b/mmcls/models/losses/focal_loss.py\n--- a/mmcls/models/losses/focal_loss.py\n+++ b/mmcls/models/losses/focal_loss.py\n@@ -3,7 +3,7 @@\n import torch.nn.functional as F\n \n from ..builder import LOSSES\n-from .utils import weight_reduce_loss\n+from .utils import convert_to_one_hot, weight_reduce_loss\n \n \n def sigmoid_focal_loss(pred,\n@@ -88,7 +88,7 @@\n Args:\n pred (torch.Tensor): The prediction with shape (N, \\*).\n target (torch.Tensor): The ground truth label of the prediction\n- with shape (N, \\*).\n+ with shape (N, \\*), N or (N,1).\n weight (torch.Tensor, optional): Sample-wise loss weight with shape\n (N, \\*). Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n@@ -103,6 +103,8 @@\n assert reduction_override in (None, 'none', 'mean', 'sum')\n reduction = (\n reduction_override if reduction_override else self.reduction)\n+ if target.dim() == 1 or (target.dim() == 2 and target.shape[1] == 1):\n+ target = convert_to_one_hot(target.view(-1, 1), pred.shape[-1])\n loss_cls = self.loss_weight * sigmoid_focal_loss(\n pred,\n target,\n", "issue": "Focal loss for single label classification?\n### Checklist\r\n- I have searched related issues but cannot get the expected help.\r\n- I have read related documents and don't know what to do.\r\n\r\n### Describe the question you meet\r\n\r\nI'm trying to train a resnet18 with a LinearClsHead using the focal loss for a single label binary classification problem, but the current focal loss implementation raises an assertion error as the predictions have shape [batch_size, 2] and target has shape [batch_size]. According to the documentation the implemented focal loss only works for multilabel tasks. Is there any way of using it also for single label tasks? \r\n\r\n### Post related information\r\n1. The output of `pip list | grep \"mmcv\\|mmcls\\|^torch\"`\r\nmmcls 0.17.0\r\nmmcv-full 1.3.9\r\ntorch 1.9.0\r\ntorch-model-archiver 0.4.1\r\ntorchvision 0.10.0\r\n\r\n2. This is the model config I'm using:\r\n```python\r\n# type: ignore\r\nmodel = dict(\r\n type=\"ImageClassifier\",\r\n backbone=dict(\r\n type=\"ResNet\", depth=18, num_stages=4, out_indices=(3,), style=\"pytorch\"\r\n ),\r\n neck=dict(type=\"GlobalAveragePooling\"),\r\n head=dict(\r\n type=\"LinearClsHead\",\r\n num_classes=2,\r\n in_channels=512,\r\n loss=dict(type=\"FocalLoss\", loss_weight=1.0),\r\n topk=(1),\r\n ),\r\n)\r\n\r\nload_from = \"/media/VA/pretrained_weights/mmcls/resnet18_batch256_20200708-34ab8f90.pth\"\r\n```\r\n3. I am getting the following error during training:\r\n```\r\nFile \"/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/mmcls/apis/train.py\", line 164, in train_model\r\n runner.run(data_loaders, cfg.workflow)\r\n File \"/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/mmcv/runner/epoch_based_runner.py\", line 127, in run\r\n epoch_runner(data_loaders[i], **kwargs)\r\n File \"/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/mmcv/runner/epoch_based_runner.py\", line 50, in train\r\n self.run_iter(data_batch, train_mode=True, **kwargs)\r\n File \"/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/mmcv/runner/epoch_based_runner.py\", line 29, in run_iter\r\n outputs = self.model.train_step(data_batch, self.optimizer,\r\n File \"/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/mmcv/parallel/data_parallel.py\", line 67, in train_step\r\n return self.module.train_step(*inputs[0], **kwargs[0])\r\n File \"/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/mmcls/models/classifiers/base.py\", line 146, in train_step\r\n losses = self(**data)\r\n File \"/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/torch/nn/modules/module.py\", line 1051, in _call_impl\r\n return forward_call(*input, **kwargs)\r\n File \"/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/mmcv/runner/fp16_utils.py\", line 98, in new_func\r\n return old_func(*args, **kwargs)\r\n File \"/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/mmcls/models/classifiers/base.py\", line 90, in forward\r\n return self.forward_train(img, **kwargs)\r\n File \"/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/mmcls/models/classifiers/image.py\", line 110, in forward_train\r\n loss = self.head.forward_train(x, gt_label)\r\n File \"/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/mmcls/models/heads/linear_head.py\", line 53, in forward_train\r\n losses = self.loss(cls_score, gt_label)\r\n File \"/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/mmcls/models/heads/cls_head.py\", line 46, in loss\r\n loss = self.compute_loss(cls_score, gt_label, avg_factor=num_samples)\r\n File \"/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/torch/nn/modules/module.py\", line 1051, in _call_impl\r\n return forward_call(*input, **kwargs)\r\n File \"/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/mmcls/models/losses/focal_loss.py\", line 106, in forward\r\n loss_cls = self.loss_weight * sigmoid_focal_loss(\r\n File \"/media/data/miniconda3/envs/ai-project-screen_classification/lib/python3.8/site-packages/mmcls/models/losses/focal_loss.py\", line 36, in sigmoid_focal_loss\r\n assert pred.shape == \\\r\nAssertionError: pred and target should be in the same shape.\r\n```\r\nThank you!\n", "before_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..builder import LOSSES\nfrom .utils import weight_reduce_loss\n\n\ndef sigmoid_focal_loss(pred,\n target,\n weight=None,\n gamma=2.0,\n alpha=0.25,\n reduction='mean',\n avg_factor=None):\n r\"\"\"Sigmoid focal loss.\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, \\*).\n target (torch.Tensor): The ground truth label of the prediction with\n shape (N, \\*).\n weight (torch.Tensor, optional): Sample-wise loss weight with shape\n (N, ). Defaults to None.\n gamma (float): The gamma for calculating the modulating factor.\n Defaults to 2.0.\n alpha (float): A balanced form for Focal Loss. Defaults to 0.25.\n reduction (str): The method used to reduce the loss.\n Options are \"none\", \"mean\" and \"sum\". If reduction is 'none' ,\n loss is same shape as pred and label. Defaults to 'mean'.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n\n Returns:\n torch.Tensor: Loss.\n \"\"\"\n assert pred.shape == \\\n target.shape, 'pred and target should be in the same shape.'\n pred_sigmoid = pred.sigmoid()\n target = target.type_as(pred)\n pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)\n focal_weight = (alpha * target + (1 - alpha) *\n (1 - target)) * pt.pow(gamma)\n loss = F.binary_cross_entropy_with_logits(\n pred, target, reduction='none') * focal_weight\n if weight is not None:\n assert weight.dim() == 1\n weight = weight.float()\n if pred.dim() > 1:\n weight = weight.reshape(-1, 1)\n loss = weight_reduce_loss(loss, weight, reduction, avg_factor)\n return loss\n\n\[email protected]_module()\nclass FocalLoss(nn.Module):\n \"\"\"Focal loss.\n\n Args:\n gamma (float): Focusing parameter in focal loss.\n Defaults to 2.0.\n alpha (float): The parameter in balanced form of focal\n loss. Defaults to 0.25.\n reduction (str): The method used to reduce the loss into\n a scalar. Options are \"none\" and \"mean\". Defaults to 'mean'.\n loss_weight (float): Weight of loss. Defaults to 1.0.\n \"\"\"\n\n def __init__(self,\n gamma=2.0,\n alpha=0.25,\n reduction='mean',\n loss_weight=1.0):\n\n super(FocalLoss, self).__init__()\n self.gamma = gamma\n self.alpha = alpha\n self.reduction = reduction\n self.loss_weight = loss_weight\n\n def forward(self,\n pred,\n target,\n weight=None,\n avg_factor=None,\n reduction_override=None):\n r\"\"\"Sigmoid focal loss.\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, \\*).\n target (torch.Tensor): The ground truth label of the prediction\n with shape (N, \\*).\n weight (torch.Tensor, optional): Sample-wise loss weight with shape\n (N, \\*). Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The method used to reduce the\n loss into a scalar. Options are \"none\", \"mean\" and \"sum\".\n Defaults to None.\n\n Returns:\n torch.Tensor: Loss.\n \"\"\"\n assert reduction_override in (None, 'none', 'mean', 'sum')\n reduction = (\n reduction_override if reduction_override else self.reduction)\n loss_cls = self.loss_weight * sigmoid_focal_loss(\n pred,\n target,\n weight,\n gamma=self.gamma,\n alpha=self.alpha,\n reduction=reduction,\n avg_factor=avg_factor)\n return loss_cls\n", "path": "mmcls/models/losses/focal_loss.py"}], "after_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..builder import LOSSES\nfrom .utils import convert_to_one_hot, weight_reduce_loss\n\n\ndef sigmoid_focal_loss(pred,\n target,\n weight=None,\n gamma=2.0,\n alpha=0.25,\n reduction='mean',\n avg_factor=None):\n r\"\"\"Sigmoid focal loss.\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, \\*).\n target (torch.Tensor): The ground truth label of the prediction with\n shape (N, \\*).\n weight (torch.Tensor, optional): Sample-wise loss weight with shape\n (N, ). Defaults to None.\n gamma (float): The gamma for calculating the modulating factor.\n Defaults to 2.0.\n alpha (float): A balanced form for Focal Loss. Defaults to 0.25.\n reduction (str): The method used to reduce the loss.\n Options are \"none\", \"mean\" and \"sum\". If reduction is 'none' ,\n loss is same shape as pred and label. Defaults to 'mean'.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n\n Returns:\n torch.Tensor: Loss.\n \"\"\"\n assert pred.shape == \\\n target.shape, 'pred and target should be in the same shape.'\n pred_sigmoid = pred.sigmoid()\n target = target.type_as(pred)\n pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)\n focal_weight = (alpha * target + (1 - alpha) *\n (1 - target)) * pt.pow(gamma)\n loss = F.binary_cross_entropy_with_logits(\n pred, target, reduction='none') * focal_weight\n if weight is not None:\n assert weight.dim() == 1\n weight = weight.float()\n if pred.dim() > 1:\n weight = weight.reshape(-1, 1)\n loss = weight_reduce_loss(loss, weight, reduction, avg_factor)\n return loss\n\n\[email protected]_module()\nclass FocalLoss(nn.Module):\n \"\"\"Focal loss.\n\n Args:\n gamma (float): Focusing parameter in focal loss.\n Defaults to 2.0.\n alpha (float): The parameter in balanced form of focal\n loss. Defaults to 0.25.\n reduction (str): The method used to reduce the loss into\n a scalar. Options are \"none\" and \"mean\". Defaults to 'mean'.\n loss_weight (float): Weight of loss. Defaults to 1.0.\n \"\"\"\n\n def __init__(self,\n gamma=2.0,\n alpha=0.25,\n reduction='mean',\n loss_weight=1.0):\n\n super(FocalLoss, self).__init__()\n self.gamma = gamma\n self.alpha = alpha\n self.reduction = reduction\n self.loss_weight = loss_weight\n\n def forward(self,\n pred,\n target,\n weight=None,\n avg_factor=None,\n reduction_override=None):\n r\"\"\"Sigmoid focal loss.\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, \\*).\n target (torch.Tensor): The ground truth label of the prediction\n with shape (N, \\*), N or (N,1).\n weight (torch.Tensor, optional): Sample-wise loss weight with shape\n (N, \\*). Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The method used to reduce the\n loss into a scalar. Options are \"none\", \"mean\" and \"sum\".\n Defaults to None.\n\n Returns:\n torch.Tensor: Loss.\n \"\"\"\n assert reduction_override in (None, 'none', 'mean', 'sum')\n reduction = (\n reduction_override if reduction_override else self.reduction)\n if target.dim() == 1 or (target.dim() == 2 and target.shape[1] == 1):\n target = convert_to_one_hot(target.view(-1, 1), pred.shape[-1])\n loss_cls = self.loss_weight * sigmoid_focal_loss(\n pred,\n target,\n weight,\n gamma=self.gamma,\n alpha=self.alpha,\n reduction=reduction,\n avg_factor=avg_factor)\n return loss_cls\n", "path": "mmcls/models/losses/focal_loss.py"}]}
| 2,615 | 333 |
gh_patches_debug_3209
|
rasdani/github-patches
|
git_diff
|
twisted__twisted-452
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
flattenEvent() fails with "'ascii' codec can't encode character" in Python 2.7
|[<img alt="znerol's avatar" src="https://avatars.githubusercontent.com/u/23288?s=50" width="50" height="50">](https://github.com/znerol)| @znerol reported|
|-|-|
|Trac ID|trac#8699|
|Type|defect|
|Created|2016-07-28 05:59:35Z|
backtrace ends in _flatten.py
```
twisted/logger/_flatten.py(119)flattenEvent()
-> flattenedValue = conversionFunction(fieldValue)
(Pdb) conversionFunction
<type 'str'>
```
I guess conversionFunction should be unicode in Python 2.7 and str in Python 3.
<details><summary>Searchable metadata</summary>
```
trac-id__8699 8699
type__defect defect
reporter__znerol znerol
priority__normal normal
milestone__None None
branch__
branch_author__
status__closed closed
resolution__fixed fixed
component__core core
keywords__None None
time__1469685575533160 1469685575533160
changetime__1470085969851774 1470085969851774
version__None None
owner__Craig_Rodrigues__rodrigc_____ Craig Rodrigues <rodrigc@...>
```
</details>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `twisted/logger/_flatten.py`
Content:
```
1 # -*- test-case-name: twisted.logger.test.test_flatten -*-
2 # Copyright (c) Twisted Matrix Laboratories.
3 # See LICENSE for details.
4
5 """
6 Code related to "flattening" events; that is, extracting a description of all
7 relevant fields from the format string and persisting them for later
8 examination.
9 """
10
11 from string import Formatter
12 from collections import defaultdict
13
14 from twisted.python.compat import unicode
15
16 aFormatter = Formatter()
17
18
19
20 class KeyFlattener(object):
21 """
22 A L{KeyFlattener} computes keys for the things within curly braces in
23 PEP-3101-style format strings as parsed by L{string.Formatter.parse}.
24 """
25
26 def __init__(self):
27 """
28 Initialize a L{KeyFlattener}.
29 """
30 self.keys = defaultdict(lambda: 0)
31
32
33 def flatKey(self, fieldName, formatSpec, conversion):
34 """
35 Compute a string key for a given field/format/conversion.
36
37 @param fieldName: A format field name.
38 @type fieldName: L{str}
39
40 @param formatSpec: A format spec.
41 @type formatSpec: L{str}
42
43 @param conversion: A format field conversion type.
44 @type conversion: L{str}
45
46 @return: A key specific to the given field, format and conversion, as
47 well as the occurrence of that combination within this
48 L{KeyFlattener}'s lifetime.
49 @rtype: L{str}
50 """
51 result = (
52 "{fieldName}!{conversion}:{formatSpec}"
53 .format(
54 fieldName=fieldName,
55 formatSpec=(formatSpec or ""),
56 conversion=(conversion or ""),
57 )
58 )
59 self.keys[result] += 1
60 n = self.keys[result]
61 if n != 1:
62 result += "/" + str(self.keys[result])
63 return result
64
65
66
67 def flattenEvent(event):
68 """
69 Flatten the given event by pre-associating format fields with specific
70 objects and callable results in a L{dict} put into the C{"log_flattened"}
71 key in the event.
72
73 @param event: A logging event.
74 @type event: L{dict}
75 """
76 if "log_format" not in event:
77 return
78
79 if "log_flattened" in event:
80 fields = event["log_flattened"]
81 else:
82 fields = {}
83
84 keyFlattener = KeyFlattener()
85
86 for (literalText, fieldName, formatSpec, conversion) in (
87 aFormatter.parse(event["log_format"])
88 ):
89 if fieldName is None:
90 continue
91
92 if conversion != "r":
93 conversion = "s"
94
95 flattenedKey = keyFlattener.flatKey(fieldName, formatSpec, conversion)
96 structuredKey = keyFlattener.flatKey(fieldName, formatSpec, "")
97
98 if flattenedKey in fields:
99 # We've already seen and handled this key
100 continue
101
102 if fieldName.endswith(u"()"):
103 fieldName = fieldName[:-2]
104 callit = True
105 else:
106 callit = False
107
108 field = aFormatter.get_field(fieldName, (), event)
109 fieldValue = field[0]
110
111 if conversion == "r":
112 conversionFunction = repr
113 else: # Above: if conversion is not "r", it's "s"
114 conversionFunction = str
115
116 if callit:
117 fieldValue = fieldValue()
118
119 flattenedValue = conversionFunction(fieldValue)
120 fields[flattenedKey] = flattenedValue
121 fields[structuredKey] = fieldValue
122
123 if fields:
124 event["log_flattened"] = fields
125
126
127
128 def extractField(field, event):
129 """
130 Extract a given format field from the given event.
131
132 @param field: A string describing a format field or log key. This is the
133 text that would normally fall between a pair of curly braces in a
134 format string: for example, C{"key[2].attribute"}. If a conversion is
135 specified (the thing after the C{"!"} character in a format field) then
136 the result will always be L{unicode}.
137 @type field: L{str} (native string)
138
139 @param event: A log event.
140 @type event: L{dict}
141
142 @return: A value extracted from the field.
143 @rtype: L{object}
144
145 @raise KeyError: if the field is not found in the given event.
146 """
147 keyFlattener = KeyFlattener()
148 [[literalText, fieldName, formatSpec, conversion]] = aFormatter.parse(
149 "{" + field + "}"
150 )
151 key = keyFlattener.flatKey(fieldName, formatSpec, conversion)
152 if "log_flattened" not in event:
153 flattenEvent(event)
154 return event["log_flattened"][key]
155
156
157
158 def flatFormat(event):
159 """
160 Format an event which has been flattened with L{flattenEvent}.
161
162 @param event: A logging event.
163 @type event: L{dict}
164
165 @return: A formatted string.
166 @rtype: L{unicode}
167 """
168 fieldValues = event["log_flattened"]
169 s = []
170 keyFlattener = KeyFlattener()
171 formatFields = aFormatter.parse(event["log_format"])
172 for literalText, fieldName, formatSpec, conversion in formatFields:
173 s.append(literalText)
174 if fieldName is not None:
175 key = keyFlattener.flatKey(
176 fieldName, formatSpec, conversion or "s")
177 s.append(unicode(fieldValues[key]))
178 return u"".join(s)
179
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/twisted/logger/_flatten.py b/twisted/logger/_flatten.py
--- a/twisted/logger/_flatten.py
+++ b/twisted/logger/_flatten.py
@@ -111,7 +111,7 @@
if conversion == "r":
conversionFunction = repr
else: # Above: if conversion is not "r", it's "s"
- conversionFunction = str
+ conversionFunction = unicode
if callit:
fieldValue = fieldValue()
|
{"golden_diff": "diff --git a/twisted/logger/_flatten.py b/twisted/logger/_flatten.py\n--- a/twisted/logger/_flatten.py\n+++ b/twisted/logger/_flatten.py\n@@ -111,7 +111,7 @@\n if conversion == \"r\":\n conversionFunction = repr\n else: # Above: if conversion is not \"r\", it's \"s\"\n- conversionFunction = str\n+ conversionFunction = unicode\n \n if callit:\n fieldValue = fieldValue()\n", "issue": "flattenEvent() fails with \"'ascii' codec can't encode character\" in Python 2.7\n|[<img alt=\"znerol's avatar\" src=\"https://avatars.githubusercontent.com/u/23288?s=50\" width=\"50\" height=\"50\">](https://github.com/znerol)| @znerol reported|\n|-|-|\n|Trac ID|trac#8699|\n|Type|defect|\n|Created|2016-07-28 05:59:35Z|\n\nbacktrace ends in _flatten.py\n\n```\ntwisted/logger/_flatten.py(119)flattenEvent()\n-> flattenedValue = conversionFunction(fieldValue)\n(Pdb) conversionFunction\n<type 'str'>\n```\n\nI guess conversionFunction should be unicode in Python 2.7 and str in Python 3.\n\n<details><summary>Searchable metadata</summary>\n\n```\ntrac-id__8699 8699\ntype__defect defect\nreporter__znerol znerol\npriority__normal normal\nmilestone__None None\nbranch__ \nbranch_author__ \nstatus__closed closed\nresolution__fixed fixed\ncomponent__core core\nkeywords__None None\ntime__1469685575533160 1469685575533160\nchangetime__1470085969851774 1470085969851774\nversion__None None\nowner__Craig_Rodrigues__rodrigc_____ Craig Rodrigues <rodrigc@...>\n\n```\n</details>\n\n", "before_files": [{"content": "# -*- test-case-name: twisted.logger.test.test_flatten -*-\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nCode related to \"flattening\" events; that is, extracting a description of all\nrelevant fields from the format string and persisting them for later\nexamination.\n\"\"\"\n\nfrom string import Formatter\nfrom collections import defaultdict\n\nfrom twisted.python.compat import unicode\n\naFormatter = Formatter()\n\n\n\nclass KeyFlattener(object):\n \"\"\"\n A L{KeyFlattener} computes keys for the things within curly braces in\n PEP-3101-style format strings as parsed by L{string.Formatter.parse}.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize a L{KeyFlattener}.\n \"\"\"\n self.keys = defaultdict(lambda: 0)\n\n\n def flatKey(self, fieldName, formatSpec, conversion):\n \"\"\"\n Compute a string key for a given field/format/conversion.\n\n @param fieldName: A format field name.\n @type fieldName: L{str}\n\n @param formatSpec: A format spec.\n @type formatSpec: L{str}\n\n @param conversion: A format field conversion type.\n @type conversion: L{str}\n\n @return: A key specific to the given field, format and conversion, as\n well as the occurrence of that combination within this\n L{KeyFlattener}'s lifetime.\n @rtype: L{str}\n \"\"\"\n result = (\n \"{fieldName}!{conversion}:{formatSpec}\"\n .format(\n fieldName=fieldName,\n formatSpec=(formatSpec or \"\"),\n conversion=(conversion or \"\"),\n )\n )\n self.keys[result] += 1\n n = self.keys[result]\n if n != 1:\n result += \"/\" + str(self.keys[result])\n return result\n\n\n\ndef flattenEvent(event):\n \"\"\"\n Flatten the given event by pre-associating format fields with specific\n objects and callable results in a L{dict} put into the C{\"log_flattened\"}\n key in the event.\n\n @param event: A logging event.\n @type event: L{dict}\n \"\"\"\n if \"log_format\" not in event:\n return\n\n if \"log_flattened\" in event:\n fields = event[\"log_flattened\"]\n else:\n fields = {}\n\n keyFlattener = KeyFlattener()\n\n for (literalText, fieldName, formatSpec, conversion) in (\n aFormatter.parse(event[\"log_format\"])\n ):\n if fieldName is None:\n continue\n\n if conversion != \"r\":\n conversion = \"s\"\n\n flattenedKey = keyFlattener.flatKey(fieldName, formatSpec, conversion)\n structuredKey = keyFlattener.flatKey(fieldName, formatSpec, \"\")\n\n if flattenedKey in fields:\n # We've already seen and handled this key\n continue\n\n if fieldName.endswith(u\"()\"):\n fieldName = fieldName[:-2]\n callit = True\n else:\n callit = False\n\n field = aFormatter.get_field(fieldName, (), event)\n fieldValue = field[0]\n\n if conversion == \"r\":\n conversionFunction = repr\n else: # Above: if conversion is not \"r\", it's \"s\"\n conversionFunction = str\n\n if callit:\n fieldValue = fieldValue()\n\n flattenedValue = conversionFunction(fieldValue)\n fields[flattenedKey] = flattenedValue\n fields[structuredKey] = fieldValue\n\n if fields:\n event[\"log_flattened\"] = fields\n\n\n\ndef extractField(field, event):\n \"\"\"\n Extract a given format field from the given event.\n\n @param field: A string describing a format field or log key. This is the\n text that would normally fall between a pair of curly braces in a\n format string: for example, C{\"key[2].attribute\"}. If a conversion is\n specified (the thing after the C{\"!\"} character in a format field) then\n the result will always be L{unicode}.\n @type field: L{str} (native string)\n\n @param event: A log event.\n @type event: L{dict}\n\n @return: A value extracted from the field.\n @rtype: L{object}\n\n @raise KeyError: if the field is not found in the given event.\n \"\"\"\n keyFlattener = KeyFlattener()\n [[literalText, fieldName, formatSpec, conversion]] = aFormatter.parse(\n \"{\" + field + \"}\"\n )\n key = keyFlattener.flatKey(fieldName, formatSpec, conversion)\n if \"log_flattened\" not in event:\n flattenEvent(event)\n return event[\"log_flattened\"][key]\n\n\n\ndef flatFormat(event):\n \"\"\"\n Format an event which has been flattened with L{flattenEvent}.\n\n @param event: A logging event.\n @type event: L{dict}\n\n @return: A formatted string.\n @rtype: L{unicode}\n \"\"\"\n fieldValues = event[\"log_flattened\"]\n s = []\n keyFlattener = KeyFlattener()\n formatFields = aFormatter.parse(event[\"log_format\"])\n for literalText, fieldName, formatSpec, conversion in formatFields:\n s.append(literalText)\n if fieldName is not None:\n key = keyFlattener.flatKey(\n fieldName, formatSpec, conversion or \"s\")\n s.append(unicode(fieldValues[key]))\n return u\"\".join(s)\n", "path": "twisted/logger/_flatten.py"}], "after_files": [{"content": "# -*- test-case-name: twisted.logger.test.test_flatten -*-\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nCode related to \"flattening\" events; that is, extracting a description of all\nrelevant fields from the format string and persisting them for later\nexamination.\n\"\"\"\n\nfrom string import Formatter\nfrom collections import defaultdict\n\nfrom twisted.python.compat import unicode\n\naFormatter = Formatter()\n\n\n\nclass KeyFlattener(object):\n \"\"\"\n A L{KeyFlattener} computes keys for the things within curly braces in\n PEP-3101-style format strings as parsed by L{string.Formatter.parse}.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize a L{KeyFlattener}.\n \"\"\"\n self.keys = defaultdict(lambda: 0)\n\n\n def flatKey(self, fieldName, formatSpec, conversion):\n \"\"\"\n Compute a string key for a given field/format/conversion.\n\n @param fieldName: A format field name.\n @type fieldName: L{str}\n\n @param formatSpec: A format spec.\n @type formatSpec: L{str}\n\n @param conversion: A format field conversion type.\n @type conversion: L{str}\n\n @return: A key specific to the given field, format and conversion, as\n well as the occurrence of that combination within this\n L{KeyFlattener}'s lifetime.\n @rtype: L{str}\n \"\"\"\n result = (\n \"{fieldName}!{conversion}:{formatSpec}\"\n .format(\n fieldName=fieldName,\n formatSpec=(formatSpec or \"\"),\n conversion=(conversion or \"\"),\n )\n )\n self.keys[result] += 1\n n = self.keys[result]\n if n != 1:\n result += \"/\" + str(self.keys[result])\n return result\n\n\n\ndef flattenEvent(event):\n \"\"\"\n Flatten the given event by pre-associating format fields with specific\n objects and callable results in a L{dict} put into the C{\"log_flattened\"}\n key in the event.\n\n @param event: A logging event.\n @type event: L{dict}\n \"\"\"\n if \"log_format\" not in event:\n return\n\n if \"log_flattened\" in event:\n fields = event[\"log_flattened\"]\n else:\n fields = {}\n\n keyFlattener = KeyFlattener()\n\n for (literalText, fieldName, formatSpec, conversion) in (\n aFormatter.parse(event[\"log_format\"])\n ):\n if fieldName is None:\n continue\n\n if conversion != \"r\":\n conversion = \"s\"\n\n flattenedKey = keyFlattener.flatKey(fieldName, formatSpec, conversion)\n structuredKey = keyFlattener.flatKey(fieldName, formatSpec, \"\")\n\n if flattenedKey in fields:\n # We've already seen and handled this key\n continue\n\n if fieldName.endswith(u\"()\"):\n fieldName = fieldName[:-2]\n callit = True\n else:\n callit = False\n\n field = aFormatter.get_field(fieldName, (), event)\n fieldValue = field[0]\n\n if conversion == \"r\":\n conversionFunction = repr\n else: # Above: if conversion is not \"r\", it's \"s\"\n conversionFunction = unicode\n\n if callit:\n fieldValue = fieldValue()\n\n flattenedValue = conversionFunction(fieldValue)\n fields[flattenedKey] = flattenedValue\n fields[structuredKey] = fieldValue\n\n if fields:\n event[\"log_flattened\"] = fields\n\n\n\ndef extractField(field, event):\n \"\"\"\n Extract a given format field from the given event.\n\n @param field: A string describing a format field or log key. This is the\n text that would normally fall between a pair of curly braces in a\n format string: for example, C{\"key[2].attribute\"}. If a conversion is\n specified (the thing after the C{\"!\"} character in a format field) then\n the result will always be L{unicode}.\n @type field: L{str} (native string)\n\n @param event: A log event.\n @type event: L{dict}\n\n @return: A value extracted from the field.\n @rtype: L{object}\n\n @raise KeyError: if the field is not found in the given event.\n \"\"\"\n keyFlattener = KeyFlattener()\n [[literalText, fieldName, formatSpec, conversion]] = aFormatter.parse(\n \"{\" + field + \"}\"\n )\n key = keyFlattener.flatKey(fieldName, formatSpec, conversion)\n if \"log_flattened\" not in event:\n flattenEvent(event)\n return event[\"log_flattened\"][key]\n\n\n\ndef flatFormat(event):\n \"\"\"\n Format an event which has been flattened with L{flattenEvent}.\n\n @param event: A logging event.\n @type event: L{dict}\n\n @return: A formatted string.\n @rtype: L{unicode}\n \"\"\"\n fieldValues = event[\"log_flattened\"]\n s = []\n keyFlattener = KeyFlattener()\n formatFields = aFormatter.parse(event[\"log_format\"])\n for literalText, fieldName, formatSpec, conversion in formatFields:\n s.append(literalText)\n if fieldName is not None:\n key = keyFlattener.flatKey(\n fieldName, formatSpec, conversion or \"s\")\n s.append(unicode(fieldValues[key]))\n return u\"\".join(s)\n", "path": "twisted/logger/_flatten.py"}]}
| 2,273 | 110 |
gh_patches_debug_57077
|
rasdani/github-patches
|
git_diff
|
canonical__cloud-init-5343
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cloud-init generates a traceback if a default route already exists during ephemeral network setup
This bug was originally filed in Launchpad as [LP: #1860164](https://bugs.launchpad.net/cloud-init/+bug/1860164)
<details>
<summary>Launchpad details</summary>
<pre>
affected_projects = []
assignee = None
assignee_name = None
date_closed = None
date_created = 2020-01-17T18:37:30.886100+00:00
date_fix_committed = None
date_fix_released = None
id = 1860164
importance = medium
is_complete = False
lp_url = https://bugs.launchpad.net/cloud-init/+bug/1860164
milestone = None
owner = rjschwei
owner_name = Robert Schweikert
private = False
status = triaged
submitter = rjschwei
submitter_name = Robert Schweikert
tags = []
duplicates = []
</pre>
</details>
_Launchpad user **Robert Schweikert(rjschwei)** wrote on 2020-01-17T18:37:30.886100+00:00_
If a route already exists when the ephemeral network exists cloud-init will generate the following traceback:
2020-01-16 21:14:22,584 - util.py[DEBUG]: Getting data from <class 'cloudinit.sources.DataSourceOracle.DataSourceOracle'> failed
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/cloudinit/sources/__init__.py", line 760, in find_source
if s.update_metadata([EventType.BOOT_NEW_INSTANCE]):
File "/usr/lib/python2.7/site-packages/cloudinit/sources/__init__.py", line 649, in update_metadata
result = self.get_data()
File "/usr/lib/python2.7/site-packages/cloudinit/sources/__init__.py", line 273, in get_data
return_value = self._get_data()
File "/usr/lib/python2.7/site-packages/cloudinit/sources/DataSourceOracle.py", line 195, in _get_data
with dhcp.EphemeralDHCPv4(net.find_fallback_nic()):
File "/usr/lib/python2.7/site-packages/cloudinit/net/dhcp.py", line 57, in __enter__
return self.obtain_lease()
File "/usr/lib/python2.7/site-packages/cloudinit/net/dhcp.py", line 109, in obtain_lease
ephipv4.__enter__()
File "/usr/lib/python2.7/site-packages/cloudinit/net/__init__.py", line 920, in __enter__
self._bringup_static_routes()
File "/usr/lib/python2.7/site-packages/cloudinit/net/__init__.py", line 974, in _bringup_static_routes
['dev', self.interface], capture=True)
File "/usr/lib/python2.7/site-packages/cloudinit/util.py", line 2083, in subp
cmd=args)
ProcessExecutionError: Unexpected error while running command.
This is a regression from 19.1 on SUSE where exiting routes were simply skipped.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cloudinit/net/netops/iproute2.py`
Content:
```
1 from typing import Optional
2
3 from cloudinit import subp
4 from cloudinit.net.netops import NetOps
5
6
7 class Iproute2(NetOps):
8 @staticmethod
9 def link_up(
10 interface: str, family: Optional[str] = None
11 ) -> subp.SubpResult:
12 family_args = []
13 if family:
14 family_args = ["-family", family]
15 return subp.subp(
16 ["ip", *family_args, "link", "set", "dev", interface, "up"]
17 )
18
19 @staticmethod
20 def link_down(
21 interface: str, family: Optional[str] = None
22 ) -> subp.SubpResult:
23 family_args = []
24 if family:
25 family_args = ["-family", family]
26 return subp.subp(
27 ["ip", *family_args, "link", "set", "dev", interface, "down"]
28 )
29
30 @staticmethod
31 def link_rename(current_name: str, new_name: str):
32 subp.subp(["ip", "link", "set", current_name, "name", new_name])
33
34 @staticmethod
35 def add_route(
36 interface: str,
37 route: str,
38 *,
39 gateway: Optional[str] = None,
40 source_address: Optional[str] = None,
41 ):
42 gateway_args = []
43 source_args = []
44 if gateway and gateway != "0.0.0.0":
45 gateway_args = ["via", gateway]
46 if source_address:
47 source_args = ["src", source_address]
48 subp.subp(
49 [
50 "ip",
51 "-4",
52 "route",
53 "add",
54 route,
55 *gateway_args,
56 "dev",
57 interface,
58 *source_args,
59 ]
60 )
61
62 @staticmethod
63 def append_route(interface: str, address: str, gateway: str):
64 gateway_args = []
65 if gateway and gateway != "0.0.0.0":
66 gateway_args = ["via", gateway]
67 subp.subp(
68 [
69 "ip",
70 "-4",
71 "route",
72 "append",
73 address,
74 *gateway_args,
75 "dev",
76 interface,
77 ]
78 )
79
80 @staticmethod
81 def del_route(
82 interface: str,
83 address: str,
84 *,
85 gateway: Optional[str] = None,
86 source_address: Optional[str] = None,
87 ):
88 gateway_args = []
89 source_args = []
90 if gateway and gateway != "0.0.0.0":
91 gateway_args = ["via", gateway]
92 if source_address:
93 source_args = ["src", source_address]
94 subp.subp(
95 [
96 "ip",
97 "-4",
98 "route",
99 "del",
100 address,
101 *gateway_args,
102 "dev",
103 interface,
104 *source_args,
105 ]
106 )
107
108 @staticmethod
109 def get_default_route() -> str:
110 return subp.subp(
111 ["ip", "route", "show", "0.0.0.0/0"],
112 ).stdout
113
114 @staticmethod
115 def add_addr(
116 interface: str, address: str, broadcast: Optional[str] = None
117 ):
118 broadcast_args = []
119 if broadcast:
120 broadcast_args = ["broadcast", broadcast]
121 subp.subp(
122 [
123 "ip",
124 "-family",
125 "inet",
126 "addr",
127 "add",
128 address,
129 *broadcast_args,
130 "dev",
131 interface,
132 ],
133 update_env={"LANG": "C"},
134 )
135
136 @staticmethod
137 def del_addr(interface: str, address: str):
138 subp.subp(
139 ["ip", "-family", "inet", "addr", "del", address, "dev", interface]
140 )
141
142 @staticmethod
143 def flush_addr(interface: str):
144 subp.subp(["ip", "flush", "dev", interface])
145
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cloudinit/net/netops/iproute2.py b/cloudinit/net/netops/iproute2.py
--- a/cloudinit/net/netops/iproute2.py
+++ b/cloudinit/net/netops/iproute2.py
@@ -50,7 +50,7 @@
"ip",
"-4",
"route",
- "add",
+ "replace",
route,
*gateway_args,
"dev",
|
{"golden_diff": "diff --git a/cloudinit/net/netops/iproute2.py b/cloudinit/net/netops/iproute2.py\n--- a/cloudinit/net/netops/iproute2.py\n+++ b/cloudinit/net/netops/iproute2.py\n@@ -50,7 +50,7 @@\n \"ip\",\n \"-4\",\n \"route\",\n- \"add\",\n+ \"replace\",\n route,\n *gateway_args,\n \"dev\",\n", "issue": "cloud-init generates a traceback if a default route already exists during ephemeral network setup\nThis bug was originally filed in Launchpad as [LP: #1860164](https://bugs.launchpad.net/cloud-init/+bug/1860164)\n<details>\n<summary>Launchpad details</summary>\n<pre>\naffected_projects = []\nassignee = None\nassignee_name = None\ndate_closed = None\ndate_created = 2020-01-17T18:37:30.886100+00:00\ndate_fix_committed = None\ndate_fix_released = None\nid = 1860164\nimportance = medium\nis_complete = False\nlp_url = https://bugs.launchpad.net/cloud-init/+bug/1860164\nmilestone = None\nowner = rjschwei\nowner_name = Robert Schweikert\nprivate = False\nstatus = triaged\nsubmitter = rjschwei\nsubmitter_name = Robert Schweikert\ntags = []\nduplicates = []\n</pre>\n</details>\n\n_Launchpad user **Robert Schweikert(rjschwei)** wrote on 2020-01-17T18:37:30.886100+00:00_\n\nIf a route already exists when the ephemeral network exists cloud-init will generate the following traceback:\n\n2020-01-16 21:14:22,584 - util.py[DEBUG]: Getting data from <class 'cloudinit.sources.DataSourceOracle.DataSourceOracle'> failed\nTraceback (most recent call last):\n File \"/usr/lib/python2.7/site-packages/cloudinit/sources/__init__.py\", line 760, in find_source\n if s.update_metadata([EventType.BOOT_NEW_INSTANCE]):\n File \"/usr/lib/python2.7/site-packages/cloudinit/sources/__init__.py\", line 649, in update_metadata\n result = self.get_data()\n File \"/usr/lib/python2.7/site-packages/cloudinit/sources/__init__.py\", line 273, in get_data\n return_value = self._get_data()\n File \"/usr/lib/python2.7/site-packages/cloudinit/sources/DataSourceOracle.py\", line 195, in _get_data\n with dhcp.EphemeralDHCPv4(net.find_fallback_nic()):\n File \"/usr/lib/python2.7/site-packages/cloudinit/net/dhcp.py\", line 57, in __enter__\n return self.obtain_lease()\n File \"/usr/lib/python2.7/site-packages/cloudinit/net/dhcp.py\", line 109, in obtain_lease\n ephipv4.__enter__()\n File \"/usr/lib/python2.7/site-packages/cloudinit/net/__init__.py\", line 920, in __enter__\n self._bringup_static_routes()\n File \"/usr/lib/python2.7/site-packages/cloudinit/net/__init__.py\", line 974, in _bringup_static_routes\n ['dev', self.interface], capture=True)\n File \"/usr/lib/python2.7/site-packages/cloudinit/util.py\", line 2083, in subp\n cmd=args)\nProcessExecutionError: Unexpected error while running command.\n\nThis is a regression from 19.1 on SUSE where exiting routes were simply skipped.\n", "before_files": [{"content": "from typing import Optional\n\nfrom cloudinit import subp\nfrom cloudinit.net.netops import NetOps\n\n\nclass Iproute2(NetOps):\n @staticmethod\n def link_up(\n interface: str, family: Optional[str] = None\n ) -> subp.SubpResult:\n family_args = []\n if family:\n family_args = [\"-family\", family]\n return subp.subp(\n [\"ip\", *family_args, \"link\", \"set\", \"dev\", interface, \"up\"]\n )\n\n @staticmethod\n def link_down(\n interface: str, family: Optional[str] = None\n ) -> subp.SubpResult:\n family_args = []\n if family:\n family_args = [\"-family\", family]\n return subp.subp(\n [\"ip\", *family_args, \"link\", \"set\", \"dev\", interface, \"down\"]\n )\n\n @staticmethod\n def link_rename(current_name: str, new_name: str):\n subp.subp([\"ip\", \"link\", \"set\", current_name, \"name\", new_name])\n\n @staticmethod\n def add_route(\n interface: str,\n route: str,\n *,\n gateway: Optional[str] = None,\n source_address: Optional[str] = None,\n ):\n gateway_args = []\n source_args = []\n if gateway and gateway != \"0.0.0.0\":\n gateway_args = [\"via\", gateway]\n if source_address:\n source_args = [\"src\", source_address]\n subp.subp(\n [\n \"ip\",\n \"-4\",\n \"route\",\n \"add\",\n route,\n *gateway_args,\n \"dev\",\n interface,\n *source_args,\n ]\n )\n\n @staticmethod\n def append_route(interface: str, address: str, gateway: str):\n gateway_args = []\n if gateway and gateway != \"0.0.0.0\":\n gateway_args = [\"via\", gateway]\n subp.subp(\n [\n \"ip\",\n \"-4\",\n \"route\",\n \"append\",\n address,\n *gateway_args,\n \"dev\",\n interface,\n ]\n )\n\n @staticmethod\n def del_route(\n interface: str,\n address: str,\n *,\n gateway: Optional[str] = None,\n source_address: Optional[str] = None,\n ):\n gateway_args = []\n source_args = []\n if gateway and gateway != \"0.0.0.0\":\n gateway_args = [\"via\", gateway]\n if source_address:\n source_args = [\"src\", source_address]\n subp.subp(\n [\n \"ip\",\n \"-4\",\n \"route\",\n \"del\",\n address,\n *gateway_args,\n \"dev\",\n interface,\n *source_args,\n ]\n )\n\n @staticmethod\n def get_default_route() -> str:\n return subp.subp(\n [\"ip\", \"route\", \"show\", \"0.0.0.0/0\"],\n ).stdout\n\n @staticmethod\n def add_addr(\n interface: str, address: str, broadcast: Optional[str] = None\n ):\n broadcast_args = []\n if broadcast:\n broadcast_args = [\"broadcast\", broadcast]\n subp.subp(\n [\n \"ip\",\n \"-family\",\n \"inet\",\n \"addr\",\n \"add\",\n address,\n *broadcast_args,\n \"dev\",\n interface,\n ],\n update_env={\"LANG\": \"C\"},\n )\n\n @staticmethod\n def del_addr(interface: str, address: str):\n subp.subp(\n [\"ip\", \"-family\", \"inet\", \"addr\", \"del\", address, \"dev\", interface]\n )\n\n @staticmethod\n def flush_addr(interface: str):\n subp.subp([\"ip\", \"flush\", \"dev\", interface])\n", "path": "cloudinit/net/netops/iproute2.py"}], "after_files": [{"content": "from typing import Optional\n\nfrom cloudinit import subp\nfrom cloudinit.net.netops import NetOps\n\n\nclass Iproute2(NetOps):\n @staticmethod\n def link_up(\n interface: str, family: Optional[str] = None\n ) -> subp.SubpResult:\n family_args = []\n if family:\n family_args = [\"-family\", family]\n return subp.subp(\n [\"ip\", *family_args, \"link\", \"set\", \"dev\", interface, \"up\"]\n )\n\n @staticmethod\n def link_down(\n interface: str, family: Optional[str] = None\n ) -> subp.SubpResult:\n family_args = []\n if family:\n family_args = [\"-family\", family]\n return subp.subp(\n [\"ip\", *family_args, \"link\", \"set\", \"dev\", interface, \"down\"]\n )\n\n @staticmethod\n def link_rename(current_name: str, new_name: str):\n subp.subp([\"ip\", \"link\", \"set\", current_name, \"name\", new_name])\n\n @staticmethod\n def add_route(\n interface: str,\n route: str,\n *,\n gateway: Optional[str] = None,\n source_address: Optional[str] = None,\n ):\n gateway_args = []\n source_args = []\n if gateway and gateway != \"0.0.0.0\":\n gateway_args = [\"via\", gateway]\n if source_address:\n source_args = [\"src\", source_address]\n subp.subp(\n [\n \"ip\",\n \"-4\",\n \"route\",\n \"replace\",\n route,\n *gateway_args,\n \"dev\",\n interface,\n *source_args,\n ]\n )\n\n @staticmethod\n def append_route(interface: str, address: str, gateway: str):\n gateway_args = []\n if gateway and gateway != \"0.0.0.0\":\n gateway_args = [\"via\", gateway]\n subp.subp(\n [\n \"ip\",\n \"-4\",\n \"route\",\n \"append\",\n address,\n *gateway_args,\n \"dev\",\n interface,\n ]\n )\n\n @staticmethod\n def del_route(\n interface: str,\n address: str,\n *,\n gateway: Optional[str] = None,\n source_address: Optional[str] = None,\n ):\n gateway_args = []\n source_args = []\n if gateway and gateway != \"0.0.0.0\":\n gateway_args = [\"via\", gateway]\n if source_address:\n source_args = [\"src\", source_address]\n subp.subp(\n [\n \"ip\",\n \"-4\",\n \"route\",\n \"del\",\n address,\n *gateway_args,\n \"dev\",\n interface,\n *source_args,\n ]\n )\n\n @staticmethod\n def get_default_route() -> str:\n return subp.subp(\n [\"ip\", \"route\", \"show\", \"0.0.0.0/0\"],\n ).stdout\n\n @staticmethod\n def add_addr(\n interface: str, address: str, broadcast: Optional[str] = None\n ):\n broadcast_args = []\n if broadcast:\n broadcast_args = [\"broadcast\", broadcast]\n subp.subp(\n [\n \"ip\",\n \"-family\",\n \"inet\",\n \"addr\",\n \"add\",\n address,\n *broadcast_args,\n \"dev\",\n interface,\n ],\n update_env={\"LANG\": \"C\"},\n )\n\n @staticmethod\n def del_addr(interface: str, address: str):\n subp.subp(\n [\"ip\", \"-family\", \"inet\", \"addr\", \"del\", address, \"dev\", interface]\n )\n\n @staticmethod\n def flush_addr(interface: str):\n subp.subp([\"ip\", \"flush\", \"dev\", interface])\n", "path": "cloudinit/net/netops/iproute2.py"}]}
| 2,167 | 94 |
gh_patches_debug_9045
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-2967
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider att is broken
During the global build at 2021-06-02-14-42-40, spider **att** failed with **0 features** and **5433 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/att.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/att.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/att.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/att.py`
Content:
```
1 import scrapy
2 import json
3 import re
4 from locations.items import GeojsonPointItem
5 from locations.hours import OpeningHours
6
7
8 DAY_MAPPING = {
9 "MONDAY": "Mo",
10 "TUESDAY": "Tu",
11 "WEDNESDAY": "We",
12 "THURSDAY": "Th",
13 "FRIDAY": "Fr",
14 "SATURDAY": "Sa",
15 "SUNDAY": "Su"
16 }
17
18
19 class ATTScraper(scrapy.Spider):
20 name = "att"
21 item_attributes = { 'brand': "AT&T", 'brand_wikidata': "Q35476" }
22 allowed_domains = ['www.att.com']
23 start_urls = (
24 'https://www.att.com/stores/us',
25 )
26 download_delay = 0.2
27
28 def parse_hours(self, store_hours):
29 opening_hours = OpeningHours()
30 store_data = json.loads(store_hours)
31
32 for store_day in store_data:
33 if len(store_day["intervals"]) < 1:
34 continue
35 day = DAY_MAPPING[store_day["day"]]
36 open_time = str(store_day["intervals"][0]["start"])
37 if open_time == '0':
38 open_time = '0000'
39 close_time = str(store_day["intervals"][0]["end"])
40 if close_time == '0':
41 close_time = '2359'
42 opening_hours.add_range(day=day,
43 open_time=open_time,
44 close_time=close_time,
45 time_format='%H%M'
46 )
47
48 return opening_hours.as_opening_hours()
49
50 def parse(self, response):
51 urls = response.xpath('//a[@class="Directory-listLink"]/@href').extract()
52 is_store_list = response.xpath('//a[@class="Teaser-titleLink"]/@href').extract()
53
54 if not urls and is_store_list:
55 urls = response.xpath('//a[@class="Teaser-titleLink"]/@href').extract()
56 for url in urls:
57 if url.count('/') >= 2:
58 yield scrapy.Request(response.urljoin(url), callback=self.parse_store)
59 else:
60 yield scrapy.Request(response.urljoin(url))
61
62 def parse_store(self, response):
63 ref = re.search(r'.+/(.+?)/?(?:\.html|$)', response.url).group(1)
64
65 properties = {
66 'ref': ref,
67 'name': response.xpath('normalize-space(//span[@class="LocationName-brand"]/text())').extract_first(),
68 'addr_full': response.xpath('normalize-space(//meta[@itemprop="streetAddress"]/@content)').extract_first(),
69 'city': response.xpath('normalize-space(//meta[@itemprop="addressLocality"]/@content)').extract_first(),
70 'state': response.xpath('normalize-space(//abbr[@itemprop="addressRegion"]/text())').extract_first(),
71 'postcode': response.xpath('normalize-space(//span[@itemprop="postalCode"]/text())').extract_first(),
72 'country': response.xpath('normalize-space(//abbr[@itemprop="addressCountry"]/text())').extract_first(),
73 'phone': response.xpath('normalize-space(//span[@itemprop="telephone"]//text())').extract_first(),
74 'website': response.url,
75 'lat': response.xpath('normalize-space(//meta[@itemprop="latitude"]/@content)').extract_first(),
76 'lon': response.xpath('normalize-space(//meta[@itemprop="longitude"]/@content)').extract_first(),
77 }
78
79 hours = response.xpath('//span[@class="c-location-hours-today js-location-hours"]/@data-days').extract_first()
80 properties['opening_hours'] = self.parse_hours(hours)
81
82 yield GeojsonPointItem(**properties)
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/att.py b/locations/spiders/att.py
--- a/locations/spiders/att.py
+++ b/locations/spiders/att.py
@@ -76,7 +76,7 @@
'lon': response.xpath('normalize-space(//meta[@itemprop="longitude"]/@content)').extract_first(),
}
- hours = response.xpath('//span[@class="c-location-hours-today js-location-hours"]/@data-days').extract_first()
+ hours = response.xpath('//span[@class="c-hours-today js-hours-today"]/@data-days').extract_first()
properties['opening_hours'] = self.parse_hours(hours)
yield GeojsonPointItem(**properties)
|
{"golden_diff": "diff --git a/locations/spiders/att.py b/locations/spiders/att.py\n--- a/locations/spiders/att.py\n+++ b/locations/spiders/att.py\n@@ -76,7 +76,7 @@\n 'lon': response.xpath('normalize-space(//meta[@itemprop=\"longitude\"]/@content)').extract_first(),\n }\n \n- hours = response.xpath('//span[@class=\"c-location-hours-today js-location-hours\"]/@data-days').extract_first()\n+ hours = response.xpath('//span[@class=\"c-hours-today js-hours-today\"]/@data-days').extract_first()\n properties['opening_hours'] = self.parse_hours(hours)\n \n yield GeojsonPointItem(**properties)\n", "issue": "Spider att is broken\nDuring the global build at 2021-06-02-14-42-40, spider **att** failed with **0 features** and **5433 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/att.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/att.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/att.geojson))\n", "before_files": [{"content": "import scrapy\nimport json\nimport re\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nDAY_MAPPING = {\n \"MONDAY\": \"Mo\",\n \"TUESDAY\": \"Tu\",\n \"WEDNESDAY\": \"We\",\n \"THURSDAY\": \"Th\",\n \"FRIDAY\": \"Fr\",\n \"SATURDAY\": \"Sa\",\n \"SUNDAY\": \"Su\"\n}\n\n\nclass ATTScraper(scrapy.Spider):\n name = \"att\"\n item_attributes = { 'brand': \"AT&T\", 'brand_wikidata': \"Q35476\" }\n allowed_domains = ['www.att.com']\n start_urls = (\n 'https://www.att.com/stores/us',\n )\n download_delay = 0.2\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n store_data = json.loads(store_hours)\n\n for store_day in store_data:\n if len(store_day[\"intervals\"]) < 1:\n continue\n day = DAY_MAPPING[store_day[\"day\"]]\n open_time = str(store_day[\"intervals\"][0][\"start\"])\n if open_time == '0':\n open_time = '0000'\n close_time = str(store_day[\"intervals\"][0][\"end\"])\n if close_time == '0':\n close_time = '2359'\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%H%M'\n )\n\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n urls = response.xpath('//a[@class=\"Directory-listLink\"]/@href').extract()\n is_store_list = response.xpath('//a[@class=\"Teaser-titleLink\"]/@href').extract()\n\n if not urls and is_store_list:\n urls = response.xpath('//a[@class=\"Teaser-titleLink\"]/@href').extract()\n for url in urls:\n if url.count('/') >= 2:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_store)\n else:\n yield scrapy.Request(response.urljoin(url))\n\n def parse_store(self, response):\n ref = re.search(r'.+/(.+?)/?(?:\\.html|$)', response.url).group(1)\n\n properties = {\n 'ref': ref,\n 'name': response.xpath('normalize-space(//span[@class=\"LocationName-brand\"]/text())').extract_first(),\n 'addr_full': response.xpath('normalize-space(//meta[@itemprop=\"streetAddress\"]/@content)').extract_first(),\n 'city': response.xpath('normalize-space(//meta[@itemprop=\"addressLocality\"]/@content)').extract_first(),\n 'state': response.xpath('normalize-space(//abbr[@itemprop=\"addressRegion\"]/text())').extract_first(),\n 'postcode': response.xpath('normalize-space(//span[@itemprop=\"postalCode\"]/text())').extract_first(),\n 'country': response.xpath('normalize-space(//abbr[@itemprop=\"addressCountry\"]/text())').extract_first(),\n 'phone': response.xpath('normalize-space(//span[@itemprop=\"telephone\"]//text())').extract_first(),\n 'website': response.url,\n 'lat': response.xpath('normalize-space(//meta[@itemprop=\"latitude\"]/@content)').extract_first(),\n 'lon': response.xpath('normalize-space(//meta[@itemprop=\"longitude\"]/@content)').extract_first(),\n }\n\n hours = response.xpath('//span[@class=\"c-location-hours-today js-location-hours\"]/@data-days').extract_first()\n properties['opening_hours'] = self.parse_hours(hours)\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/att.py"}], "after_files": [{"content": "import scrapy\nimport json\nimport re\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nDAY_MAPPING = {\n \"MONDAY\": \"Mo\",\n \"TUESDAY\": \"Tu\",\n \"WEDNESDAY\": \"We\",\n \"THURSDAY\": \"Th\",\n \"FRIDAY\": \"Fr\",\n \"SATURDAY\": \"Sa\",\n \"SUNDAY\": \"Su\"\n}\n\n\nclass ATTScraper(scrapy.Spider):\n name = \"att\"\n item_attributes = { 'brand': \"AT&T\", 'brand_wikidata': \"Q35476\" }\n allowed_domains = ['www.att.com']\n start_urls = (\n 'https://www.att.com/stores/us',\n )\n download_delay = 0.2\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n store_data = json.loads(store_hours)\n\n for store_day in store_data:\n if len(store_day[\"intervals\"]) < 1:\n continue\n day = DAY_MAPPING[store_day[\"day\"]]\n open_time = str(store_day[\"intervals\"][0][\"start\"])\n if open_time == '0':\n open_time = '0000'\n close_time = str(store_day[\"intervals\"][0][\"end\"])\n if close_time == '0':\n close_time = '2359'\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%H%M'\n )\n\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n urls = response.xpath('//a[@class=\"Directory-listLink\"]/@href').extract()\n is_store_list = response.xpath('//a[@class=\"Teaser-titleLink\"]/@href').extract()\n\n if not urls and is_store_list:\n urls = response.xpath('//a[@class=\"Teaser-titleLink\"]/@href').extract()\n for url in urls:\n if url.count('/') >= 2:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_store)\n else:\n yield scrapy.Request(response.urljoin(url))\n\n def parse_store(self, response):\n ref = re.search(r'.+/(.+?)/?(?:\\.html|$)', response.url).group(1)\n\n properties = {\n 'ref': ref,\n 'name': response.xpath('normalize-space(//span[@class=\"LocationName-brand\"]/text())').extract_first(),\n 'addr_full': response.xpath('normalize-space(//meta[@itemprop=\"streetAddress\"]/@content)').extract_first(),\n 'city': response.xpath('normalize-space(//meta[@itemprop=\"addressLocality\"]/@content)').extract_first(),\n 'state': response.xpath('normalize-space(//abbr[@itemprop=\"addressRegion\"]/text())').extract_first(),\n 'postcode': response.xpath('normalize-space(//span[@itemprop=\"postalCode\"]/text())').extract_first(),\n 'country': response.xpath('normalize-space(//abbr[@itemprop=\"addressCountry\"]/text())').extract_first(),\n 'phone': response.xpath('normalize-space(//span[@itemprop=\"telephone\"]//text())').extract_first(),\n 'website': response.url,\n 'lat': response.xpath('normalize-space(//meta[@itemprop=\"latitude\"]/@content)').extract_first(),\n 'lon': response.xpath('normalize-space(//meta[@itemprop=\"longitude\"]/@content)').extract_first(),\n }\n\n hours = response.xpath('//span[@class=\"c-hours-today js-hours-today\"]/@data-days').extract_first()\n properties['opening_hours'] = self.parse_hours(hours)\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/att.py"}]}
| 1,391 | 155 |
gh_patches_debug_42289
|
rasdani/github-patches
|
git_diff
|
electricitymaps__electricitymaps-contrib-3177
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Panama parser down due to website redesign
The [website](https://sitr.cnd.com.pa/m/pub/gen.html) used by PA.py has changed and the parser is failing.
At first glance it looks like they just put on a lick of paint, so changing a few CSS selectors should probably suffice to get it working again.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/PA.py`
Content:
```
1 #!/usr/bin/env python3
2 # coding=utf-8
3
4 # The arrow library is used to handle datetimes
5 import arrow
6 # The request library is used to fetch content through HTTP
7 import requests
8 # The BeautifulSoup library is used to parse HTML
9 from bs4 import BeautifulSoup
10
11 import logging
12
13
14 def fetch_production(zone_key='PA', session=None, target_datetime=None, logger: logging.Logger = logging.getLogger(__name__)):
15 """Requests the last known production mix (in MW) of a given country
16
17 Arguments:
18 zone_key (optional) -- used in case a parser is able to fetch multiple countries
19 session (optional) -- request session passed in order to re-use an existing session
20
21 Return:
22 A dictionary in the form:
23 {
24 'zoneKey': 'FR',
25 'datetime': '2017-01-01T00:00:00Z',
26 'production': {
27 'biomass': 0.0,
28 'coal': 0.0,
29 'gas': 0.0,
30 'hydro': 0.0,
31 'nuclear': null,
32 'oil': 0.0,
33 'solar': 0.0,
34 'wind': 0.0,
35 'geothermal': 0.0,
36 'unknown': 0.0
37 },
38 'storage': {
39 'hydro': -10.0,
40 },
41 'source': 'mysource.com'
42 }
43 """
44 if target_datetime:
45 raise NotImplementedError('This parser is not yet able to parse past dates')
46
47 #Fetch page and load into BeautifulSoup
48 r = session or requests.session()
49 url = 'http://sitr.cnd.com.pa/m/pub/gen.html'
50 response = r.get(url)
51 response.encoding = 'utf-8'
52 html_doc = response.text
53 soup = BeautifulSoup(html_doc, 'html.parser')
54
55 #Parse production from pie chart
56 productions = soup.find('table', {'class': 'sitr-pie-layout'}).find_all('span')
57 map_generation = {
58 'Hídrica': 'hydro',
59 'Eólica': 'wind',
60 'Solar': 'solar',
61 'Biogas': 'biomass',
62 'Térmica': 'unknown'
63 }
64 data = {
65 'zoneKey': 'PA',
66 'production': {
67 #Setting default values here so we can do += when parsing the thermal generation breakdown
68 'biomass': 0.0,
69 'coal': 0.0,
70 'gas': 0.0,
71 'hydro': 0.0,
72 'nuclear': 0.0,
73 'oil': 0.0,
74 'solar': 0.0,
75 'wind': 0.0,
76 'geothermal': 0.0,
77 'unknown': 0.0,
78 },
79 'storage': {},
80 'source': 'https://www.cnd.com.pa/',
81 }
82 for prod in productions:
83 prod_data = prod.string.split(' ')
84 production_mean = map_generation[prod_data[0]]
85 production_value = float(prod_data[1])
86 data['production'][production_mean] = production_value
87
88 #Known fossil plants: parse, subtract from "unknown", add to "coal"/"oil"/"gas"
89 thermal_production_breakdown = soup.find_all('table', {'class': 'sitr-table-gen'})[1]
90 #Make sure the table header is indeed "Térmicas (MW)" (in case the tables are re-arranged)
91 thermal_production_breakdown_table_header = thermal_production_breakdown.select('thead > tr > td > span')[0].string
92 assert ('Térmicas' in thermal_production_breakdown_table_header), (
93 "Exception when extracting thermal generation breakdown for {}: table header does not contain "
94 "'Térmicas' but is instead named {}".format(zone_key, thermal_production_breakdown_table_header)
95 )
96 thermal_production_units = thermal_production_breakdown.select('tbody tr td table.sitr-gen-group tr')
97 map_thermal_generation_unit_name_to_fuel_type = {
98 'ACP Miraflores 2': 'oil',#[7] Sheet "C-GE-1A-1 CapInstXEmp"
99 'ACP Miraflores 5': 'oil',#[7] Sheet "C-GE-1A-1 CapInstXEmp"
100 'ACP Miraflores 6': 'oil',#[7] Sheet "C-GE-1A-1 CapInstXEmp"
101 'ACP Miraflores 7': 'oil',#[7] Sheet "C-GE-1A-1 CapInstXEmp"
102 'ACP Miraflores 8': 'oil',#[7] Sheet "C-GE-1A-1 CapInstXEmp"
103 'ACP Miraflores 9': 'oil',#[7] Sheet "C-GE-1A-1 CapInstXEmp"
104 'ACP Miraflores 10': 'oil',#[7] Sheet "C-GE-1A-1 CapInstXEmp"
105 'BLM 2': 'coal',#[7] Sheet "C-GE-1A-2 CapInstXEmp"
106 'BLM 3': 'coal',#[7] Sheet "C-GE-1A-2 CapInstXEmp"
107 'BLM 4': 'coal',#[7] Sheet "C-GE-1A-2 CapInstXEmp"
108 'BLM 5': 'oil',#[7] Sheet "C-GE-1A-2 CapInstXEmp"
109 'BLM 6': 'oil',#[7] Sheet "C-GE-1A-2 CapInstXEmp"
110 'BLM 8': 'oil',#[7] Sheet "C-GE-1A-2 CapInstXEmp"
111 'BLM 9': 'oil',#[7] Sheet "C-GE-1A-2 CapInstXEmp" mentions no fuel type, and given all other units are accounted for this must be the heat recovery boiler for the 3 diesel-fired units mentioned in [2]
112 'Cativá 1': 'oil',#[1][2]
113 'Cativá 2': 'oil',#[1][2]
114 'Cativá 3': 'oil',#[1][2]
115 'Cativá 4': 'oil',#[1][2]
116 'Cativá 5': 'oil',#[1][2]
117 'Cativá 6': 'oil',#[1][2]
118 'Cativá 7': 'oil',#[1][2]
119 'Cativá 8': 'oil',#[1][2]
120 'Cativá 9': 'oil',#[1][2]
121 'Cativá 10': 'oil',#[1][2]
122 'Cobre Panamá 1': 'coal',#[3]
123 'Cobre Panamá 2': 'coal',#[3]
124 'Costa Norte 1': 'gas',#[4][5]
125 'Costa Norte 2': 'gas',#[4][5]
126 'Costa Norte 3': 'gas',#[4][5]
127 'Costa Norte 4': 'gas',#[4][5]
128 'Esperanza 1': 'oil',#[7] has a single 92MW bunker fuel power plant, but [8] shows this is actually a power barge with 7 units
129 'Esperanza 2': 'oil',#[7] has a single 92MW bunker fuel power plant, but [8] shows this is actually a power barge with 7 units
130 'Esperanza 3': 'oil',#[7] has a single 92MW bunker fuel power plant, but [8] shows this is actually a power barge with 7 units
131 'Esperanza 4': 'oil',#[7] has a single 92MW bunker fuel power plant, but [8] shows this is actually a power barge with 7 units
132 'Esperanza 5': 'oil',#[7] has a single 92MW bunker fuel power plant, but [8] shows this is actually a power barge with 7 units
133 'Esperanza 6': 'oil',#[7] has a single 92MW bunker fuel power plant, but [8] shows this is actually a power barge with 7 units
134 'Esperanza 7': 'oil',#[7] has a single 92MW bunker fuel power plant, but [8] shows this is actually a power barge with 7 units
135 'Jinro': 'oil',#[6][7]
136 'Pacora 1': 'oil',#[6]
137 'Pacora 2': 'oil',#[6]
138 'Pacora 3': 'oil',#[6]
139 'PanAm 1': 'oil',#[6][7]
140 'PanAm 2': 'oil',#[6][7]
141 'PanAm 3': 'oil',#[6][7]
142 'PanAm 4': 'oil',#[6][7]
143 'PanAm 5': 'oil',#[6][7]
144 'PanAm 6': 'oil',#[6][7]
145 'PanAm 7': 'oil',#[6][7]
146 'PanAm 8': 'oil',#[6][7]
147 'PanAm 9': 'oil',#[6][7]
148 'Termocolón 1': 'oil',#[6] (spelled "Termo Colón")
149 'Termocolón 2': 'oil',#[6] (spelled "Termo Colón")
150 'Termocolón 3': 'oil',#[6] (spelled "Termo Colón")
151 'Tropitérmica 1': 'oil',#[6]:162[7] spelled "Tropitermica" in both
152 'Tropitérmica 2': 'oil',#[6]:162[7] spelled "Tropitermica" in both
153 'Tropitérmica 3': 'oil',#[6]:162[7] spelled "Tropitermica" in both
154 }
155 #Sources:
156 #1. https://www.celsia.com/Portals/0/contenidos-celsia/accionistas-e-inversionistas/perfil-corporativo-US/presentaciones-US/2014/presentacion-morgan-ingles-v2.pdf
157 #2. https://www.celsia.com/en/about-celsia/business-model/power-generation/thermoelectric-power-plants
158 #3. https://endcoal.org/tracker/
159 #4. http://aesmcac.com/aespanamades/en/colon/ "It reuses the heat from the exhaust gas from the gas turbines in order to obtain steam, to be later used by a steam turbine and to save fuel consumption in the production of electricity."
160 #5. https://panamcham.com/sites/default/files/el_inicio_del_futuro_del_gas_natural_en_panama.pdf "3 gas turbines and 1 steam (3X1 configuration)" "Technology: Combined Cycle" | This and the previous source taken together seems to imply that the steam turbine is responsible for the second cycle of the CCGT plant, giving confidence that output from all four units should indeed be tallied under "gas". Furthermore, as the plant also has a LNG import facility it is most unlikely the steam turbine would be burning a different fuel such as coal or oil.
161 #6. https://www.etesa.com.pa/documentos/Tomo_II__Plan_Indicativo_de_Generacin_2019__2033.pdf page 142
162 #7. http://168.77.210.79/energia/wp-content/uploads/sites/2/2020/08/2-CEE-1970-2019-GE-Generaci%C3%B3n-El%C3%A9ctrica.xls (via http://www.energia.gob.pa/mercado-energetico/?tag=84#documents-list)
163 #8. https://www.asep.gob.pa/wp-content/uploads/electricidad/resoluciones/anno_12528_elec.pdf
164 for thermal_production_unit in thermal_production_units:
165 unit_name_and_generation = thermal_production_unit.find_all('td')
166 unit_name = unit_name_and_generation[0].string
167 unit_generation = float(unit_name_and_generation[1].string)
168 if(unit_name in map_thermal_generation_unit_name_to_fuel_type):
169 if(unit_generation > 0):#Ignore self-consumption
170 unit_fuel_type = map_thermal_generation_unit_name_to_fuel_type[unit_name]
171 data['production'][unit_fuel_type] += unit_generation
172 data['production']['unknown'] -= unit_generation
173 else:
174 logger.warning(u'{} is not mapped to generation type'.format(unit_name), extra={'key': zone_key})
175
176 #Thermal total from the graph and the total one would get from summing output of all generators deviates a bit,
177 #presumably because they aren't updated at the exact same moment.
178 #Because negative production causes an error with ElectricityMap, we'll ignore small amounts of negative production
179 #TODO we might want to use the sum of the production of all thermal units instead of this workaround,
180 #because now we're still reporting small *postive* amounts of "ghost" thermal production
181 if data['production']['unknown'] < 0 and data['production']['unknown'] > -10:
182 logger.info(f"Ignoring small amount of negative thermal generation ({data['production']['unknown']}MW)", extra={"key": zone_key})
183 data['production']['unknown'] = 0
184
185 #Round remaining "unknown" output to 13 decimal places to get rid of floating point errors
186 data['production']['unknown'] = round(data['production']['unknown'],13)
187
188 # Parse the datetime and return a python datetime object
189 spanish_date = soup.find('div', {'class': 'sitr-update'}).find('span').string
190 date = arrow.get(spanish_date, 'DD-MMMM-YYYY H:mm:ss', locale="es", tzinfo="America/Panama")
191 data['datetime'] = date.datetime
192
193 return data
194
195
196 if __name__ == '__main__':
197 """Main method, never used by the Electricity Map backend, but handy for testing."""
198
199 print('fetch_production() ->')
200 print(fetch_production())
201
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/parsers/PA.py b/parsers/PA.py
--- a/parsers/PA.py
+++ b/parsers/PA.py
@@ -7,10 +7,20 @@
import requests
# The BeautifulSoup library is used to parse HTML
from bs4 import BeautifulSoup
+# Extracting some data using regex
+import re
+import json
import logging
+def extract_pie_chart_data(html):
+ """Extracts generation breakdown pie chart data from the source code of the page"""
+ dataSource = re.search(r"var localPie = (\[\{.+\}\]);", html).group(1)#Extract object with data
+ dataSource = re.sub(r"(name|value|color)", r'"\1"', dataSource) #Un-quoted keys ({key:"value"}) are valid JavaScript but not valid JSON (which requires {"key":"value"}). Will break if other keys than these three are introduced. Alternatively, use a JSON5 library (JSON5 allows un-quoted keys)
+ return json.loads(dataSource)
+
+
def fetch_production(zone_key='PA', session=None, target_datetime=None, logger: logging.Logger = logging.getLogger(__name__)):
"""Requests the last known production mix (in MW) of a given country
@@ -53,7 +63,7 @@
soup = BeautifulSoup(html_doc, 'html.parser')
#Parse production from pie chart
- productions = soup.find('table', {'class': 'sitr-pie-layout'}).find_all('span')
+ productions = extract_pie_chart_data(html_doc) #[{name:"Hídrica 1342.54 (80.14%)",value:1342.54,color:"#99ccee"}, ...]
map_generation = {
'Hídrica': 'hydro',
'Eólica': 'wind',
@@ -79,16 +89,16 @@
'storage': {},
'source': 'https://www.cnd.com.pa/',
}
- for prod in productions:
- prod_data = prod.string.split(' ')
- production_mean = map_generation[prod_data[0]]
- production_value = float(prod_data[1])
- data['production'][production_mean] = production_value
+ for prod in productions: #{name:"Hídrica 1342.54 (80.14%)", ...}
+ prod_data = prod['name'].split(' ') #"Hídrica 1342.54 (80.14%)"
+ production_type = map_generation[prod_data[0]] #Hídrica
+ production_value = float(prod_data[1]) #1342.54
+ data['production'][production_type] = production_value
#Known fossil plants: parse, subtract from "unknown", add to "coal"/"oil"/"gas"
thermal_production_breakdown = soup.find_all('table', {'class': 'sitr-table-gen'})[1]
#Make sure the table header is indeed "Térmicas (MW)" (in case the tables are re-arranged)
- thermal_production_breakdown_table_header = thermal_production_breakdown.select('thead > tr > td > span')[0].string
+ thermal_production_breakdown_table_header = thermal_production_breakdown.parent.parent.parent.select('> .tile-title')[0].string
assert ('Térmicas' in thermal_production_breakdown_table_header), (
"Exception when extracting thermal generation breakdown for {}: table header does not contain "
"'Térmicas' but is instead named {}".format(zone_key, thermal_production_breakdown_table_header)
@@ -186,7 +196,7 @@
data['production']['unknown'] = round(data['production']['unknown'],13)
# Parse the datetime and return a python datetime object
- spanish_date = soup.find('div', {'class': 'sitr-update'}).find('span').string
+ spanish_date = soup.find('h3', {'class': 'sitr-update'}).string
date = arrow.get(spanish_date, 'DD-MMMM-YYYY H:mm:ss', locale="es", tzinfo="America/Panama")
data['datetime'] = date.datetime
|
{"golden_diff": "diff --git a/parsers/PA.py b/parsers/PA.py\n--- a/parsers/PA.py\n+++ b/parsers/PA.py\n@@ -7,10 +7,20 @@\n import requests\n # The BeautifulSoup library is used to parse HTML\n from bs4 import BeautifulSoup\n+# Extracting some data using regex\n+import re\n+import json\n \n import logging\n \n \n+def extract_pie_chart_data(html):\n+ \"\"\"Extracts generation breakdown pie chart data from the source code of the page\"\"\"\n+ dataSource = re.search(r\"var localPie = (\\[\\{.+\\}\\]);\", html).group(1)#Extract object with data\n+ dataSource = re.sub(r\"(name|value|color)\", r'\"\\1\"', dataSource) #Un-quoted keys ({key:\"value\"}) are valid JavaScript but not valid JSON (which requires {\"key\":\"value\"}). Will break if other keys than these three are introduced. Alternatively, use a JSON5 library (JSON5 allows un-quoted keys)\n+ return json.loads(dataSource)\n+\n+\n def fetch_production(zone_key='PA', session=None, target_datetime=None, logger: logging.Logger = logging.getLogger(__name__)):\n \"\"\"Requests the last known production mix (in MW) of a given country\n \n@@ -53,7 +63,7 @@\n soup = BeautifulSoup(html_doc, 'html.parser')\n \n #Parse production from pie chart\n- productions = soup.find('table', {'class': 'sitr-pie-layout'}).find_all('span')\n+ productions = extract_pie_chart_data(html_doc) #[{name:\"H\u00eddrica 1342.54 (80.14%)\",value:1342.54,color:\"#99ccee\"}, ...]\n map_generation = {\n 'H\u00eddrica': 'hydro',\n 'E\u00f3lica': 'wind',\n@@ -79,16 +89,16 @@\n 'storage': {},\n 'source': 'https://www.cnd.com.pa/',\n }\n- for prod in productions:\n- prod_data = prod.string.split(' ')\n- production_mean = map_generation[prod_data[0]]\n- production_value = float(prod_data[1])\n- data['production'][production_mean] = production_value\n+ for prod in productions: #{name:\"H\u00eddrica 1342.54 (80.14%)\", ...}\n+ prod_data = prod['name'].split(' ') #\"H\u00eddrica 1342.54 (80.14%)\"\n+ production_type = map_generation[prod_data[0]] #H\u00eddrica\n+ production_value = float(prod_data[1]) #1342.54\n+ data['production'][production_type] = production_value\n \n #Known fossil plants: parse, subtract from \"unknown\", add to \"coal\"/\"oil\"/\"gas\"\n thermal_production_breakdown = soup.find_all('table', {'class': 'sitr-table-gen'})[1]\n #Make sure the table header is indeed \"T\u00e9rmicas (MW)\" (in case the tables are re-arranged)\n- thermal_production_breakdown_table_header = thermal_production_breakdown.select('thead > tr > td > span')[0].string\n+ thermal_production_breakdown_table_header = thermal_production_breakdown.parent.parent.parent.select('> .tile-title')[0].string\n assert ('T\u00e9rmicas' in thermal_production_breakdown_table_header), (\n \"Exception when extracting thermal generation breakdown for {}: table header does not contain \"\n \"'T\u00e9rmicas' but is instead named {}\".format(zone_key, thermal_production_breakdown_table_header)\n@@ -186,7 +196,7 @@\n data['production']['unknown'] = round(data['production']['unknown'],13)\n \n # Parse the datetime and return a python datetime object\n- spanish_date = soup.find('div', {'class': 'sitr-update'}).find('span').string\n+ spanish_date = soup.find('h3', {'class': 'sitr-update'}).string\n date = arrow.get(spanish_date, 'DD-MMMM-YYYY H:mm:ss', locale=\"es\", tzinfo=\"America/Panama\")\n data['datetime'] = date.datetime\n", "issue": "Panama parser down due to website redesign\nThe [website](https://sitr.cnd.com.pa/m/pub/gen.html) used by PA.py has changed and the parser is failing.\r\n\r\nAt first glance it looks like they just put on a lick of paint, so changing a few CSS selectors should probably suffice to get it working again.\n", "before_files": [{"content": "#!/usr/bin/env python3\n# coding=utf-8\n\n# The arrow library is used to handle datetimes\nimport arrow\n# The request library is used to fetch content through HTTP\nimport requests\n# The BeautifulSoup library is used to parse HTML\nfrom bs4 import BeautifulSoup\n\nimport logging\n\n\ndef fetch_production(zone_key='PA', session=None, target_datetime=None, logger: logging.Logger = logging.getLogger(__name__)):\n \"\"\"Requests the last known production mix (in MW) of a given country\n\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n\n Return:\n A dictionary in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n #Fetch page and load into BeautifulSoup\n r = session or requests.session()\n url = 'http://sitr.cnd.com.pa/m/pub/gen.html'\n response = r.get(url)\n response.encoding = 'utf-8'\n html_doc = response.text\n soup = BeautifulSoup(html_doc, 'html.parser')\n\n #Parse production from pie chart\n productions = soup.find('table', {'class': 'sitr-pie-layout'}).find_all('span')\n map_generation = {\n 'H\u00eddrica': 'hydro',\n 'E\u00f3lica': 'wind',\n 'Solar': 'solar',\n 'Biogas': 'biomass',\n 'T\u00e9rmica': 'unknown'\n }\n data = {\n 'zoneKey': 'PA',\n 'production': {\n #Setting default values here so we can do += when parsing the thermal generation breakdown\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': 0.0,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0,\n },\n 'storage': {},\n 'source': 'https://www.cnd.com.pa/',\n }\n for prod in productions:\n prod_data = prod.string.split(' ')\n production_mean = map_generation[prod_data[0]]\n production_value = float(prod_data[1])\n data['production'][production_mean] = production_value\n\n #Known fossil plants: parse, subtract from \"unknown\", add to \"coal\"/\"oil\"/\"gas\"\n thermal_production_breakdown = soup.find_all('table', {'class': 'sitr-table-gen'})[1]\n #Make sure the table header is indeed \"T\u00e9rmicas (MW)\" (in case the tables are re-arranged)\n thermal_production_breakdown_table_header = thermal_production_breakdown.select('thead > tr > td > span')[0].string\n assert ('T\u00e9rmicas' in thermal_production_breakdown_table_header), (\n \"Exception when extracting thermal generation breakdown for {}: table header does not contain \"\n \"'T\u00e9rmicas' but is instead named {}\".format(zone_key, thermal_production_breakdown_table_header)\n )\n thermal_production_units = thermal_production_breakdown.select('tbody tr td table.sitr-gen-group tr')\n map_thermal_generation_unit_name_to_fuel_type = {\n 'ACP Miraflores 2': 'oil',#[7] Sheet \"C-GE-1A-1 CapInstXEmp\"\n 'ACP Miraflores 5': 'oil',#[7] Sheet \"C-GE-1A-1 CapInstXEmp\"\n 'ACP Miraflores 6': 'oil',#[7] Sheet \"C-GE-1A-1 CapInstXEmp\"\n 'ACP Miraflores 7': 'oil',#[7] Sheet \"C-GE-1A-1 CapInstXEmp\"\n 'ACP Miraflores 8': 'oil',#[7] Sheet \"C-GE-1A-1 CapInstXEmp\"\n 'ACP Miraflores 9': 'oil',#[7] Sheet \"C-GE-1A-1 CapInstXEmp\"\n 'ACP Miraflores 10': 'oil',#[7] Sheet \"C-GE-1A-1 CapInstXEmp\"\n 'BLM 2': 'coal',#[7] Sheet \"C-GE-1A-2 CapInstXEmp\"\n 'BLM 3': 'coal',#[7] Sheet \"C-GE-1A-2 CapInstXEmp\"\n 'BLM 4': 'coal',#[7] Sheet \"C-GE-1A-2 CapInstXEmp\"\n 'BLM 5': 'oil',#[7] Sheet \"C-GE-1A-2 CapInstXEmp\"\n 'BLM 6': 'oil',#[7] Sheet \"C-GE-1A-2 CapInstXEmp\"\n 'BLM 8': 'oil',#[7] Sheet \"C-GE-1A-2 CapInstXEmp\"\n 'BLM 9': 'oil',#[7] Sheet \"C-GE-1A-2 CapInstXEmp\" mentions no fuel type, and given all other units are accounted for this must be the heat recovery boiler for the 3 diesel-fired units mentioned in [2]\n 'Cativ\u00e1 1': 'oil',#[1][2]\n 'Cativ\u00e1 2': 'oil',#[1][2]\n 'Cativ\u00e1 3': 'oil',#[1][2]\n 'Cativ\u00e1 4': 'oil',#[1][2]\n 'Cativ\u00e1 5': 'oil',#[1][2]\n 'Cativ\u00e1 6': 'oil',#[1][2]\n 'Cativ\u00e1 7': 'oil',#[1][2]\n 'Cativ\u00e1 8': 'oil',#[1][2]\n 'Cativ\u00e1 9': 'oil',#[1][2]\n 'Cativ\u00e1 10': 'oil',#[1][2]\n 'Cobre Panam\u00e1 1': 'coal',#[3]\n 'Cobre Panam\u00e1 2': 'coal',#[3]\n 'Costa Norte 1': 'gas',#[4][5]\n 'Costa Norte 2': 'gas',#[4][5]\n 'Costa Norte 3': 'gas',#[4][5]\n 'Costa Norte 4': 'gas',#[4][5]\n 'Esperanza 1': 'oil',#[7] has a single 92MW bunker fuel power plant, but [8] shows this is actually a power barge with 7 units\n 'Esperanza 2': 'oil',#[7] has a single 92MW bunker fuel power plant, but [8] shows this is actually a power barge with 7 units\n 'Esperanza 3': 'oil',#[7] has a single 92MW bunker fuel power plant, but [8] shows this is actually a power barge with 7 units\n 'Esperanza 4': 'oil',#[7] has a single 92MW bunker fuel power plant, but [8] shows this is actually a power barge with 7 units\n 'Esperanza 5': 'oil',#[7] has a single 92MW bunker fuel power plant, but [8] shows this is actually a power barge with 7 units\n 'Esperanza 6': 'oil',#[7] has a single 92MW bunker fuel power plant, but [8] shows this is actually a power barge with 7 units\n 'Esperanza 7': 'oil',#[7] has a single 92MW bunker fuel power plant, but [8] shows this is actually a power barge with 7 units\n 'Jinro': 'oil',#[6][7]\n 'Pacora 1': 'oil',#[6]\n 'Pacora 2': 'oil',#[6]\n 'Pacora 3': 'oil',#[6]\n 'PanAm 1': 'oil',#[6][7]\n 'PanAm 2': 'oil',#[6][7]\n 'PanAm 3': 'oil',#[6][7]\n 'PanAm 4': 'oil',#[6][7]\n 'PanAm 5': 'oil',#[6][7]\n 'PanAm 6': 'oil',#[6][7]\n 'PanAm 7': 'oil',#[6][7]\n 'PanAm 8': 'oil',#[6][7]\n 'PanAm 9': 'oil',#[6][7]\n 'Termocol\u00f3n 1': 'oil',#[6] (spelled \"Termo Col\u00f3n\")\n 'Termocol\u00f3n 2': 'oil',#[6] (spelled \"Termo Col\u00f3n\")\n 'Termocol\u00f3n 3': 'oil',#[6] (spelled \"Termo Col\u00f3n\")\n 'Tropit\u00e9rmica 1': 'oil',#[6]:162[7] spelled \"Tropitermica\" in both\n 'Tropit\u00e9rmica 2': 'oil',#[6]:162[7] spelled \"Tropitermica\" in both\n 'Tropit\u00e9rmica 3': 'oil',#[6]:162[7] spelled \"Tropitermica\" in both\n }\n #Sources:\n #1. https://www.celsia.com/Portals/0/contenidos-celsia/accionistas-e-inversionistas/perfil-corporativo-US/presentaciones-US/2014/presentacion-morgan-ingles-v2.pdf\n #2. https://www.celsia.com/en/about-celsia/business-model/power-generation/thermoelectric-power-plants\n #3. https://endcoal.org/tracker/\n #4. http://aesmcac.com/aespanamades/en/colon/ \"It reuses the heat from the exhaust gas from the gas turbines in order to obtain steam, to be later used by a steam turbine and to save fuel consumption in the production of electricity.\"\n #5. https://panamcham.com/sites/default/files/el_inicio_del_futuro_del_gas_natural_en_panama.pdf \"3 gas turbines and 1 steam (3X1 configuration)\" \"Technology: Combined Cycle\" | This and the previous source taken together seems to imply that the steam turbine is responsible for the second cycle of the CCGT plant, giving confidence that output from all four units should indeed be tallied under \"gas\". Furthermore, as the plant also has a LNG import facility it is most unlikely the steam turbine would be burning a different fuel such as coal or oil.\n #6. https://www.etesa.com.pa/documentos/Tomo_II__Plan_Indicativo_de_Generacin_2019__2033.pdf page 142\n #7. http://168.77.210.79/energia/wp-content/uploads/sites/2/2020/08/2-CEE-1970-2019-GE-Generaci%C3%B3n-El%C3%A9ctrica.xls (via http://www.energia.gob.pa/mercado-energetico/?tag=84#documents-list)\n #8. https://www.asep.gob.pa/wp-content/uploads/electricidad/resoluciones/anno_12528_elec.pdf\n for thermal_production_unit in thermal_production_units:\n unit_name_and_generation = thermal_production_unit.find_all('td')\n unit_name = unit_name_and_generation[0].string\n unit_generation = float(unit_name_and_generation[1].string)\n if(unit_name in map_thermal_generation_unit_name_to_fuel_type):\n if(unit_generation > 0):#Ignore self-consumption\n unit_fuel_type = map_thermal_generation_unit_name_to_fuel_type[unit_name]\n data['production'][unit_fuel_type] += unit_generation\n data['production']['unknown'] -= unit_generation\n else:\n logger.warning(u'{} is not mapped to generation type'.format(unit_name), extra={'key': zone_key})\n\n #Thermal total from the graph and the total one would get from summing output of all generators deviates a bit,\n #presumably because they aren't updated at the exact same moment.\n #Because negative production causes an error with ElectricityMap, we'll ignore small amounts of negative production\n #TODO we might want to use the sum of the production of all thermal units instead of this workaround,\n #because now we're still reporting small *postive* amounts of \"ghost\" thermal production\n if data['production']['unknown'] < 0 and data['production']['unknown'] > -10:\n logger.info(f\"Ignoring small amount of negative thermal generation ({data['production']['unknown']}MW)\", extra={\"key\": zone_key})\n data['production']['unknown'] = 0\n\n #Round remaining \"unknown\" output to 13 decimal places to get rid of floating point errors\n data['production']['unknown'] = round(data['production']['unknown'],13)\n\n # Parse the datetime and return a python datetime object\n spanish_date = soup.find('div', {'class': 'sitr-update'}).find('span').string\n date = arrow.get(spanish_date, 'DD-MMMM-YYYY H:mm:ss', locale=\"es\", tzinfo=\"America/Panama\")\n data['datetime'] = date.datetime\n\n return data\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n", "path": "parsers/PA.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# coding=utf-8\n\n# The arrow library is used to handle datetimes\nimport arrow\n# The request library is used to fetch content through HTTP\nimport requests\n# The BeautifulSoup library is used to parse HTML\nfrom bs4 import BeautifulSoup\n# Extracting some data using regex\nimport re\nimport json\n\nimport logging\n\n\ndef extract_pie_chart_data(html):\n \"\"\"Extracts generation breakdown pie chart data from the source code of the page\"\"\"\n dataSource = re.search(r\"var localPie = (\\[\\{.+\\}\\]);\", html).group(1)#Extract object with data\n dataSource = re.sub(r\"(name|value|color)\", r'\"\\1\"', dataSource) #Un-quoted keys ({key:\"value\"}) are valid JavaScript but not valid JSON (which requires {\"key\":\"value\"}). Will break if other keys than these three are introduced. Alternatively, use a JSON5 library (JSON5 allows un-quoted keys)\n return json.loads(dataSource)\n\n\ndef fetch_production(zone_key='PA', session=None, target_datetime=None, logger: logging.Logger = logging.getLogger(__name__)):\n \"\"\"Requests the last known production mix (in MW) of a given country\n\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n\n Return:\n A dictionary in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n #Fetch page and load into BeautifulSoup\n r = session or requests.session()\n url = 'http://sitr.cnd.com.pa/m/pub/gen.html'\n response = r.get(url)\n response.encoding = 'utf-8'\n html_doc = response.text\n soup = BeautifulSoup(html_doc, 'html.parser')\n\n #Parse production from pie chart\n productions = extract_pie_chart_data(html_doc) #[{name:\"H\u00eddrica 1342.54 (80.14%)\",value:1342.54,color:\"#99ccee\"}, ...]\n map_generation = {\n 'H\u00eddrica': 'hydro',\n 'E\u00f3lica': 'wind',\n 'Solar': 'solar',\n 'Biogas': 'biomass',\n 'T\u00e9rmica': 'unknown'\n }\n data = {\n 'zoneKey': 'PA',\n 'production': {\n #Setting default values here so we can do += when parsing the thermal generation breakdown\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': 0.0,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0,\n },\n 'storage': {},\n 'source': 'https://www.cnd.com.pa/',\n }\n for prod in productions: #{name:\"H\u00eddrica 1342.54 (80.14%)\", ...}\n prod_data = prod['name'].split(' ') #\"H\u00eddrica 1342.54 (80.14%)\"\n production_type = map_generation[prod_data[0]] #H\u00eddrica\n production_value = float(prod_data[1]) #1342.54\n data['production'][production_type] = production_value\n\n #Known fossil plants: parse, subtract from \"unknown\", add to \"coal\"/\"oil\"/\"gas\"\n thermal_production_breakdown = soup.find_all('table', {'class': 'sitr-table-gen'})[1]\n #Make sure the table header is indeed \"T\u00e9rmicas (MW)\" (in case the tables are re-arranged)\n thermal_production_breakdown_table_header = thermal_production_breakdown.parent.parent.parent.select('> .tile-title')[0].string\n assert ('T\u00e9rmicas' in thermal_production_breakdown_table_header), (\n \"Exception when extracting thermal generation breakdown for {}: table header does not contain \"\n \"'T\u00e9rmicas' but is instead named {}\".format(zone_key, thermal_production_breakdown_table_header)\n )\n thermal_production_units = thermal_production_breakdown.select('tbody tr td table.sitr-gen-group tr')\n map_thermal_generation_unit_name_to_fuel_type = {\n 'ACP Miraflores 2': 'oil',#[7] Sheet \"C-GE-1A-1 CapInstXEmp\"\n 'ACP Miraflores 5': 'oil',#[7] Sheet \"C-GE-1A-1 CapInstXEmp\"\n 'ACP Miraflores 6': 'oil',#[7] Sheet \"C-GE-1A-1 CapInstXEmp\"\n 'ACP Miraflores 7': 'oil',#[7] Sheet \"C-GE-1A-1 CapInstXEmp\"\n 'ACP Miraflores 8': 'oil',#[7] Sheet \"C-GE-1A-1 CapInstXEmp\"\n 'ACP Miraflores 9': 'oil',#[7] Sheet \"C-GE-1A-1 CapInstXEmp\"\n 'ACP Miraflores 10': 'oil',#[7] Sheet \"C-GE-1A-1 CapInstXEmp\"\n 'BLM 2': 'coal',#[7] Sheet \"C-GE-1A-2 CapInstXEmp\"\n 'BLM 3': 'coal',#[7] Sheet \"C-GE-1A-2 CapInstXEmp\"\n 'BLM 4': 'coal',#[7] Sheet \"C-GE-1A-2 CapInstXEmp\"\n 'BLM 5': 'oil',#[7] Sheet \"C-GE-1A-2 CapInstXEmp\"\n 'BLM 6': 'oil',#[7] Sheet \"C-GE-1A-2 CapInstXEmp\"\n 'BLM 8': 'oil',#[7] Sheet \"C-GE-1A-2 CapInstXEmp\"\n 'BLM 9': 'oil',#[7] Sheet \"C-GE-1A-2 CapInstXEmp\" mentions no fuel type, and given all other units are accounted for this must be the heat recovery boiler for the 3 diesel-fired units mentioned in [2]\n 'Cativ\u00e1 1': 'oil',#[1][2]\n 'Cativ\u00e1 2': 'oil',#[1][2]\n 'Cativ\u00e1 3': 'oil',#[1][2]\n 'Cativ\u00e1 4': 'oil',#[1][2]\n 'Cativ\u00e1 5': 'oil',#[1][2]\n 'Cativ\u00e1 6': 'oil',#[1][2]\n 'Cativ\u00e1 7': 'oil',#[1][2]\n 'Cativ\u00e1 8': 'oil',#[1][2]\n 'Cativ\u00e1 9': 'oil',#[1][2]\n 'Cativ\u00e1 10': 'oil',#[1][2]\n 'Cobre Panam\u00e1 1': 'coal',#[3]\n 'Cobre Panam\u00e1 2': 'coal',#[3]\n 'Costa Norte 1': 'gas',#[4][5]\n 'Costa Norte 2': 'gas',#[4][5]\n 'Costa Norte 3': 'gas',#[4][5]\n 'Costa Norte 4': 'gas',#[4][5]\n 'Esperanza 1': 'oil',#[7] has a single 92MW bunker fuel power plant, but [8] shows this is actually a power barge with 7 units\n 'Esperanza 2': 'oil',#[7] has a single 92MW bunker fuel power plant, but [8] shows this is actually a power barge with 7 units\n 'Esperanza 3': 'oil',#[7] has a single 92MW bunker fuel power plant, but [8] shows this is actually a power barge with 7 units\n 'Esperanza 4': 'oil',#[7] has a single 92MW bunker fuel power plant, but [8] shows this is actually a power barge with 7 units\n 'Esperanza 5': 'oil',#[7] has a single 92MW bunker fuel power plant, but [8] shows this is actually a power barge with 7 units\n 'Esperanza 6': 'oil',#[7] has a single 92MW bunker fuel power plant, but [8] shows this is actually a power barge with 7 units\n 'Esperanza 7': 'oil',#[7] has a single 92MW bunker fuel power plant, but [8] shows this is actually a power barge with 7 units\n 'Jinro': 'oil',#[6][7]\n 'Pacora 1': 'oil',#[6]\n 'Pacora 2': 'oil',#[6]\n 'Pacora 3': 'oil',#[6]\n 'PanAm 1': 'oil',#[6][7]\n 'PanAm 2': 'oil',#[6][7]\n 'PanAm 3': 'oil',#[6][7]\n 'PanAm 4': 'oil',#[6][7]\n 'PanAm 5': 'oil',#[6][7]\n 'PanAm 6': 'oil',#[6][7]\n 'PanAm 7': 'oil',#[6][7]\n 'PanAm 8': 'oil',#[6][7]\n 'PanAm 9': 'oil',#[6][7]\n 'Termocol\u00f3n 1': 'oil',#[6] (spelled \"Termo Col\u00f3n\")\n 'Termocol\u00f3n 2': 'oil',#[6] (spelled \"Termo Col\u00f3n\")\n 'Termocol\u00f3n 3': 'oil',#[6] (spelled \"Termo Col\u00f3n\")\n 'Tropit\u00e9rmica 1': 'oil',#[6]:162[7] spelled \"Tropitermica\" in both\n 'Tropit\u00e9rmica 2': 'oil',#[6]:162[7] spelled \"Tropitermica\" in both\n 'Tropit\u00e9rmica 3': 'oil',#[6]:162[7] spelled \"Tropitermica\" in both\n }\n #Sources:\n #1. https://www.celsia.com/Portals/0/contenidos-celsia/accionistas-e-inversionistas/perfil-corporativo-US/presentaciones-US/2014/presentacion-morgan-ingles-v2.pdf\n #2. https://www.celsia.com/en/about-celsia/business-model/power-generation/thermoelectric-power-plants\n #3. https://endcoal.org/tracker/\n #4. http://aesmcac.com/aespanamades/en/colon/ \"It reuses the heat from the exhaust gas from the gas turbines in order to obtain steam, to be later used by a steam turbine and to save fuel consumption in the production of electricity.\"\n #5. https://panamcham.com/sites/default/files/el_inicio_del_futuro_del_gas_natural_en_panama.pdf \"3 gas turbines and 1 steam (3X1 configuration)\" \"Technology: Combined Cycle\" | This and the previous source taken together seems to imply that the steam turbine is responsible for the second cycle of the CCGT plant, giving confidence that output from all four units should indeed be tallied under \"gas\". Furthermore, as the plant also has a LNG import facility it is most unlikely the steam turbine would be burning a different fuel such as coal or oil.\n #6. https://www.etesa.com.pa/documentos/Tomo_II__Plan_Indicativo_de_Generacin_2019__2033.pdf page 142\n #7. http://168.77.210.79/energia/wp-content/uploads/sites/2/2020/08/2-CEE-1970-2019-GE-Generaci%C3%B3n-El%C3%A9ctrica.xls (via http://www.energia.gob.pa/mercado-energetico/?tag=84#documents-list)\n #8. https://www.asep.gob.pa/wp-content/uploads/electricidad/resoluciones/anno_12528_elec.pdf\n for thermal_production_unit in thermal_production_units:\n unit_name_and_generation = thermal_production_unit.find_all('td')\n unit_name = unit_name_and_generation[0].string\n unit_generation = float(unit_name_and_generation[1].string)\n if(unit_name in map_thermal_generation_unit_name_to_fuel_type):\n if(unit_generation > 0):#Ignore self-consumption\n unit_fuel_type = map_thermal_generation_unit_name_to_fuel_type[unit_name]\n data['production'][unit_fuel_type] += unit_generation\n data['production']['unknown'] -= unit_generation\n else:\n logger.warning(u'{} is not mapped to generation type'.format(unit_name), extra={'key': zone_key})\n\n #Thermal total from the graph and the total one would get from summing output of all generators deviates a bit,\n #presumably because they aren't updated at the exact same moment.\n #Because negative production causes an error with ElectricityMap, we'll ignore small amounts of negative production\n #TODO we might want to use the sum of the production of all thermal units instead of this workaround,\n #because now we're still reporting small *postive* amounts of \"ghost\" thermal production\n if data['production']['unknown'] < 0 and data['production']['unknown'] > -10:\n logger.info(f\"Ignoring small amount of negative thermal generation ({data['production']['unknown']}MW)\", extra={\"key\": zone_key})\n data['production']['unknown'] = 0\n\n #Round remaining \"unknown\" output to 13 decimal places to get rid of floating point errors\n data['production']['unknown'] = round(data['production']['unknown'],13)\n\n # Parse the datetime and return a python datetime object\n spanish_date = soup.find('h3', {'class': 'sitr-update'}).string\n date = arrow.get(spanish_date, 'DD-MMMM-YYYY H:mm:ss', locale=\"es\", tzinfo=\"America/Panama\")\n data['datetime'] = date.datetime\n\n return data\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n", "path": "parsers/PA.py"}]}
| 4,025 | 915 |
gh_patches_debug_41853
|
rasdani/github-patches
|
git_diff
|
ansible-collections__community.general-4935
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
community.general.lxd connection not working with molecule
### Summary
When I try to run `molecule create` with the [lxd driver](https://github.com/ansible-community/molecule-lxd), it creates the lxc container correctly, but then gives a warning and then fails to run a command on the container.
```
[WARNING]: The "ansible_collections.community.general.plugins.connection.lxd" connection plugin has an improperly configured remote target value, forcing "inventory_hostname" templated value instead of the string
```
After some debugging, I found that the `remote_addr` value was being set to the literal string 'inventory_hostname' instead of the value of the current host's `inventory_hostname`. I found another connection plugin that had [fixed a similar issue](https://github.com/ansible/ansible/pull/77894).
Applying this patch to the `plugins/connection/lxd.py` file fixes the problem.
[fix_lxd_inventory_hostname.patch.txt](https://github.com/ansible-collections/community.general/files/8960273/fix_lxd_inventory_hostname.patch.txt)
### Issue Type
Bug Report
### Component Name
plugins/connection/lxd.py
### Ansible Version
```console (paste below)
$ ansible --version
ansible [core 2.13.1]
config file = /home/anton/ansible-collection-oit-ne-servers/roles/common/ansible.cfg
configured module search path = ['/home/anton/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/anton/ansible-collection-oit-ne-servers/.venv/lib/python3.10/site-packages/ansible
ansible collection location = /home/anton/ansible-collection-oit-ne-servers/roles/common/.collections
executable location = /home/anton/ansible-collection-oit-ne-servers/.venv/bin/ansible
python version = 3.10.5 (main, Jun 11 2022, 16:53:24) [GCC 9.4.0]
jinja version = 3.1.2
libyaml = False
```
### Community.general Version
```console (paste below)
$ ansible-galaxy collection list community.general
Collection Version
----------------- -------
community.general 5.2.0
```
### Configuration
```console (paste below)
$ ansible-config dump --only-changed
```
### OS / Environment
cat /etc/os-release
NAME="Ubuntu"
VERSION="20.04.4 LTS (Focal Fossa)"
ID=ubuntu
ID_LIKE=debian
PRETTY_NAME="Ubuntu 20.04.4 LTS"
VERSION_ID="20.04"
HOME_URL="https://www.ubuntu.com/"
SUPPORT_URL="https://help.ubuntu.com/"
BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/"
PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
VERSION_CODENAME=focal
UBUNTU_CODENAME=focal
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```console
$ mkdir tmp
$ cd tmp
$ python3 -m venv .venv
$ . .venv/bin/activate
$ python3 -m pip install --upgrade pip setuptools wheel
$ python3 -m pip install ansible molecule molecule-lxd
$ molecule init role tmp.common --driver-name lxd
$ cd common
```
Modify `molecule/default/molecule.yml`:
```yaml (paste below)
dependency:
name: galaxy
driver:
name: lxd
platforms:
- name: centos-stream-8
source:
type: image
mode: pull
server: https://images.linuxcontainers.org
protocol: simplestreams
alias: centos/8-Stream/amd64
profiles: ["default"]
provisioner:
name: ansible
verifier:
name: ansible
```
```console
$ molecule create
```
### Expected Results
I expected that the lxd container would be properly created and prepared.
### Actual Results
```console (paste below)
PLAY [Prepare] *****************************************************************
TASK [Install basic packages to bare containers] *******************************
[WARNING]: The "lxd" connection plugin has an improperly configured remote
target value, forcing "inventory_hostname" templated value instead of the
string
fatal: [centos-stream-8]: FAILED! => {"changed": true, "msg": "non-zero return code", "rc": 1, "stderr": "Error: Instance not found\n", "stderr_lines": ["Error: Instance not found"], "stdout": "", "stdout_lines": []}
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/connection/lxd.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # (c) 2016 Matt Clay <[email protected]>
3 # (c) 2017 Ansible Project
4 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
5
6 from __future__ import (absolute_import, division, print_function)
7 __metaclass__ = type
8
9 DOCUMENTATION = '''
10 author: Matt Clay (@mattclay) <[email protected]>
11 name: lxd
12 short_description: Run tasks in lxc containers via lxc CLI
13 description:
14 - Run commands or put/fetch files to an existing lxc container using lxc CLI
15 options:
16 remote_addr:
17 description:
18 - Container identifier.
19 default: inventory_hostname
20 vars:
21 - name: ansible_host
22 - name: ansible_lxd_host
23 executable:
24 description:
25 - shell to use for execution inside container
26 default: /bin/sh
27 vars:
28 - name: ansible_executable
29 - name: ansible_lxd_executable
30 remote:
31 description:
32 - Name of the LXD remote to use.
33 default: local
34 vars:
35 - name: ansible_lxd_remote
36 version_added: 2.0.0
37 project:
38 description:
39 - Name of the LXD project to use.
40 vars:
41 - name: ansible_lxd_project
42 version_added: 2.0.0
43 '''
44
45 import os
46 from subprocess import Popen, PIPE
47
48 from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
49 from ansible.module_utils.common.process import get_bin_path
50 from ansible.module_utils.common.text.converters import to_bytes, to_text
51 from ansible.plugins.connection import ConnectionBase
52
53
54 class Connection(ConnectionBase):
55 """ lxd based connections """
56
57 transport = 'community.general.lxd'
58 has_pipelining = True
59 default_user = 'root'
60
61 def __init__(self, play_context, new_stdin, *args, **kwargs):
62 super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
63
64 self._host = self._play_context.remote_addr
65 try:
66 self._lxc_cmd = get_bin_path("lxc")
67 except ValueError:
68 raise AnsibleError("lxc command not found in PATH")
69
70 if self._play_context.remote_user is not None and self._play_context.remote_user != 'root':
71 self._display.warning('lxd does not support remote_user, using container default: root')
72
73 def _connect(self):
74 """connect to lxd (nothing to do here) """
75 super(Connection, self)._connect()
76
77 if not self._connected:
78 self._display.vvv(u"ESTABLISH LXD CONNECTION FOR USER: root", host=self._host)
79 self._connected = True
80
81 def exec_command(self, cmd, in_data=None, sudoable=True):
82 """ execute a command on the lxd host """
83 super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
84
85 self._display.vvv(u"EXEC {0}".format(cmd), host=self._host)
86
87 local_cmd = [self._lxc_cmd]
88 if self.get_option("project"):
89 local_cmd.extend(["--project", self.get_option("project")])
90 local_cmd.extend([
91 "exec",
92 "%s:%s" % (self.get_option("remote"), self.get_option("remote_addr")),
93 "--",
94 self.get_option("executable"), "-c", cmd
95 ])
96
97 local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
98 in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru')
99
100 process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
101 stdout, stderr = process.communicate(in_data)
102
103 stdout = to_text(stdout)
104 stderr = to_text(stderr)
105
106 if stderr == "error: Container is not running.\n":
107 raise AnsibleConnectionFailure("container not running: %s" % self._host)
108
109 if stderr == "error: not found\n":
110 raise AnsibleConnectionFailure("container not found: %s" % self._host)
111
112 return process.returncode, stdout, stderr
113
114 def put_file(self, in_path, out_path):
115 """ put a file from local to lxd """
116 super(Connection, self).put_file(in_path, out_path)
117
118 self._display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self._host)
119
120 if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')):
121 raise AnsibleFileNotFound("input path is not a file: %s" % in_path)
122
123 local_cmd = [self._lxc_cmd]
124 if self.get_option("project"):
125 local_cmd.extend(["--project", self.get_option("project")])
126 local_cmd.extend([
127 "file", "push",
128 in_path,
129 "%s:%s/%s" % (self.get_option("remote"), self.get_option("remote_addr"), out_path)
130 ])
131
132 local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
133
134 process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
135 process.communicate()
136
137 def fetch_file(self, in_path, out_path):
138 """ fetch a file from lxd to local """
139 super(Connection, self).fetch_file(in_path, out_path)
140
141 self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._host)
142
143 local_cmd = [self._lxc_cmd]
144 if self.get_option("project"):
145 local_cmd.extend(["--project", self.get_option("project")])
146 local_cmd.extend([
147 "file", "pull",
148 "%s:%s/%s" % (self.get_option("remote"), self.get_option("remote_addr"), in_path),
149 out_path
150 ])
151
152 local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
153
154 process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
155 process.communicate()
156
157 def close(self):
158 """ close the connection (nothing to do here) """
159 super(Connection, self).close()
160
161 self._connected = False
162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plugins/connection/lxd.py b/plugins/connection/lxd.py
--- a/plugins/connection/lxd.py
+++ b/plugins/connection/lxd.py
@@ -18,6 +18,7 @@
- Container identifier.
default: inventory_hostname
vars:
+ - name: inventory_hostname
- name: ansible_host
- name: ansible_lxd_host
executable:
@@ -61,7 +62,6 @@
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
- self._host = self._play_context.remote_addr
try:
self._lxc_cmd = get_bin_path("lxc")
except ValueError:
@@ -75,14 +75,14 @@
super(Connection, self)._connect()
if not self._connected:
- self._display.vvv(u"ESTABLISH LXD CONNECTION FOR USER: root", host=self._host)
+ self._display.vvv(u"ESTABLISH LXD CONNECTION FOR USER: root", host=self.get_option('remote_addr'))
self._connected = True
def exec_command(self, cmd, in_data=None, sudoable=True):
""" execute a command on the lxd host """
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
- self._display.vvv(u"EXEC {0}".format(cmd), host=self._host)
+ self._display.vvv(u"EXEC {0}".format(cmd), host=self.get_option('remote_addr'))
local_cmd = [self._lxc_cmd]
if self.get_option("project"):
@@ -104,10 +104,10 @@
stderr = to_text(stderr)
if stderr == "error: Container is not running.\n":
- raise AnsibleConnectionFailure("container not running: %s" % self._host)
+ raise AnsibleConnectionFailure("container not running: %s" % self.get_option('remote_addr'))
if stderr == "error: not found\n":
- raise AnsibleConnectionFailure("container not found: %s" % self._host)
+ raise AnsibleConnectionFailure("container not found: %s" % self.get_option('remote_addr'))
return process.returncode, stdout, stderr
@@ -115,7 +115,7 @@
""" put a file from local to lxd """
super(Connection, self).put_file(in_path, out_path)
- self._display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self._host)
+ self._display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self.get_option('remote_addr'))
if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound("input path is not a file: %s" % in_path)
@@ -138,7 +138,7 @@
""" fetch a file from lxd to local """
super(Connection, self).fetch_file(in_path, out_path)
- self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._host)
+ self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self.get_option('remote_addr'))
local_cmd = [self._lxc_cmd]
if self.get_option("project"):
|
{"golden_diff": "diff --git a/plugins/connection/lxd.py b/plugins/connection/lxd.py\n--- a/plugins/connection/lxd.py\n+++ b/plugins/connection/lxd.py\n@@ -18,6 +18,7 @@\n - Container identifier.\n default: inventory_hostname\n vars:\n+ - name: inventory_hostname\n - name: ansible_host\n - name: ansible_lxd_host\n executable:\n@@ -61,7 +62,6 @@\n def __init__(self, play_context, new_stdin, *args, **kwargs):\n super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)\n \n- self._host = self._play_context.remote_addr\n try:\n self._lxc_cmd = get_bin_path(\"lxc\")\n except ValueError:\n@@ -75,14 +75,14 @@\n super(Connection, self)._connect()\n \n if not self._connected:\n- self._display.vvv(u\"ESTABLISH LXD CONNECTION FOR USER: root\", host=self._host)\n+ self._display.vvv(u\"ESTABLISH LXD CONNECTION FOR USER: root\", host=self.get_option('remote_addr'))\n self._connected = True\n \n def exec_command(self, cmd, in_data=None, sudoable=True):\n \"\"\" execute a command on the lxd host \"\"\"\n super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)\n \n- self._display.vvv(u\"EXEC {0}\".format(cmd), host=self._host)\n+ self._display.vvv(u\"EXEC {0}\".format(cmd), host=self.get_option('remote_addr'))\n \n local_cmd = [self._lxc_cmd]\n if self.get_option(\"project\"):\n@@ -104,10 +104,10 @@\n stderr = to_text(stderr)\n \n if stderr == \"error: Container is not running.\\n\":\n- raise AnsibleConnectionFailure(\"container not running: %s\" % self._host)\n+ raise AnsibleConnectionFailure(\"container not running: %s\" % self.get_option('remote_addr'))\n \n if stderr == \"error: not found\\n\":\n- raise AnsibleConnectionFailure(\"container not found: %s\" % self._host)\n+ raise AnsibleConnectionFailure(\"container not found: %s\" % self.get_option('remote_addr'))\n \n return process.returncode, stdout, stderr\n \n@@ -115,7 +115,7 @@\n \"\"\" put a file from local to lxd \"\"\"\n super(Connection, self).put_file(in_path, out_path)\n \n- self._display.vvv(u\"PUT {0} TO {1}\".format(in_path, out_path), host=self._host)\n+ self._display.vvv(u\"PUT {0} TO {1}\".format(in_path, out_path), host=self.get_option('remote_addr'))\n \n if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')):\n raise AnsibleFileNotFound(\"input path is not a file: %s\" % in_path)\n@@ -138,7 +138,7 @@\n \"\"\" fetch a file from lxd to local \"\"\"\n super(Connection, self).fetch_file(in_path, out_path)\n \n- self._display.vvv(u\"FETCH {0} TO {1}\".format(in_path, out_path), host=self._host)\n+ self._display.vvv(u\"FETCH {0} TO {1}\".format(in_path, out_path), host=self.get_option('remote_addr'))\n \n local_cmd = [self._lxc_cmd]\n if self.get_option(\"project\"):\n", "issue": "community.general.lxd connection not working with molecule\n### Summary\r\n\r\nWhen I try to run `molecule create` with the [lxd driver](https://github.com/ansible-community/molecule-lxd), it creates the lxc container correctly, but then gives a warning and then fails to run a command on the container.\r\n```\r\n[WARNING]: The \"ansible_collections.community.general.plugins.connection.lxd\" connection plugin has an improperly configured remote target value, forcing \"inventory_hostname\" templated value instead of the string\r\n```\r\nAfter some debugging, I found that the `remote_addr` value was being set to the literal string 'inventory_hostname' instead of the value of the current host's `inventory_hostname`. I found another connection plugin that had [fixed a similar issue](https://github.com/ansible/ansible/pull/77894).\r\n\r\nApplying this patch to the `plugins/connection/lxd.py` file fixes the problem.\r\n\r\n[fix_lxd_inventory_hostname.patch.txt](https://github.com/ansible-collections/community.general/files/8960273/fix_lxd_inventory_hostname.patch.txt)\r\n\r\n\r\n### Issue Type\r\n\r\nBug Report\r\n\r\n### Component Name\r\n\r\nplugins/connection/lxd.py\r\n\r\n### Ansible Version\r\n\r\n```console (paste below)\r\n$ ansible --version\r\nansible [core 2.13.1]\r\n config file = /home/anton/ansible-collection-oit-ne-servers/roles/common/ansible.cfg\r\n configured module search path = ['/home/anton/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /home/anton/ansible-collection-oit-ne-servers/.venv/lib/python3.10/site-packages/ansible\r\n ansible collection location = /home/anton/ansible-collection-oit-ne-servers/roles/common/.collections\r\n executable location = /home/anton/ansible-collection-oit-ne-servers/.venv/bin/ansible\r\n python version = 3.10.5 (main, Jun 11 2022, 16:53:24) [GCC 9.4.0]\r\n jinja version = 3.1.2\r\n libyaml = False\r\n\r\n```\r\n\r\n\r\n### Community.general Version\r\n\r\n```console (paste below)\r\n$ ansible-galaxy collection list community.general\r\nCollection Version\r\n----------------- -------\r\ncommunity.general 5.2.0 \r\n```\r\n\r\n\r\n### Configuration\r\n\r\n```console (paste below)\r\n$ ansible-config dump --only-changed\r\n\r\n```\r\n\r\n\r\n### OS / Environment\r\n\r\ncat /etc/os-release \r\nNAME=\"Ubuntu\"\r\nVERSION=\"20.04.4 LTS (Focal Fossa)\"\r\nID=ubuntu\r\nID_LIKE=debian\r\nPRETTY_NAME=\"Ubuntu 20.04.4 LTS\"\r\nVERSION_ID=\"20.04\"\r\nHOME_URL=\"https://www.ubuntu.com/\"\r\nSUPPORT_URL=\"https://help.ubuntu.com/\"\r\nBUG_REPORT_URL=\"https://bugs.launchpad.net/ubuntu/\"\r\nPRIVACY_POLICY_URL=\"https://www.ubuntu.com/legal/terms-and-policies/privacy-policy\"\r\nVERSION_CODENAME=focal\r\nUBUNTU_CODENAME=focal\r\n\r\n### Steps to Reproduce\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```console\r\n$ mkdir tmp\r\n$ cd tmp\r\n$ python3 -m venv .venv\r\n$ . .venv/bin/activate\r\n$ python3 -m pip install --upgrade pip setuptools wheel\r\n$ python3 -m pip install ansible molecule molecule-lxd\r\n$ molecule init role tmp.common --driver-name lxd\r\n$ cd common\r\n```\r\n\r\nModify `molecule/default/molecule.yml`:\r\n```yaml (paste below)\r\ndependency:\r\n name: galaxy\r\ndriver:\r\n name: lxd\r\nplatforms:\r\n - name: centos-stream-8\r\n source:\r\n type: image\r\n mode: pull\r\n server: https://images.linuxcontainers.org\r\n protocol: simplestreams\r\n alias: centos/8-Stream/amd64\r\n profiles: [\"default\"]\r\nprovisioner:\r\n name: ansible\r\nverifier:\r\n name: ansible\r\n```\r\n\r\n```console\r\n$ molecule create\r\n```\r\n\r\n### Expected Results\r\n\r\nI expected that the lxd container would be properly created and prepared.\r\n\r\n### Actual Results\r\n\r\n```console (paste below)\r\nPLAY [Prepare] *****************************************************************\r\n\r\nTASK [Install basic packages to bare containers] *******************************\r\n[WARNING]: The \"lxd\" connection plugin has an improperly configured remote\r\ntarget value, forcing \"inventory_hostname\" templated value instead of the\r\nstring\r\nfatal: [centos-stream-8]: FAILED! => {\"changed\": true, \"msg\": \"non-zero return code\", \"rc\": 1, \"stderr\": \"Error: Instance not found\\n\", \"stderr_lines\": [\"Error: Instance not found\"], \"stdout\": \"\", \"stdout_lines\": []}\r\n\r\n```\r\n\r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# (c) 2016 Matt Clay <[email protected]>\n# (c) 2017 Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = '''\n author: Matt Clay (@mattclay) <[email protected]>\n name: lxd\n short_description: Run tasks in lxc containers via lxc CLI\n description:\n - Run commands or put/fetch files to an existing lxc container using lxc CLI\n options:\n remote_addr:\n description:\n - Container identifier.\n default: inventory_hostname\n vars:\n - name: ansible_host\n - name: ansible_lxd_host\n executable:\n description:\n - shell to use for execution inside container\n default: /bin/sh\n vars:\n - name: ansible_executable\n - name: ansible_lxd_executable\n remote:\n description:\n - Name of the LXD remote to use.\n default: local\n vars:\n - name: ansible_lxd_remote\n version_added: 2.0.0\n project:\n description:\n - Name of the LXD project to use.\n vars:\n - name: ansible_lxd_project\n version_added: 2.0.0\n'''\n\nimport os\nfrom subprocess import Popen, PIPE\n\nfrom ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound\nfrom ansible.module_utils.common.process import get_bin_path\nfrom ansible.module_utils.common.text.converters import to_bytes, to_text\nfrom ansible.plugins.connection import ConnectionBase\n\n\nclass Connection(ConnectionBase):\n \"\"\" lxd based connections \"\"\"\n\n transport = 'community.general.lxd'\n has_pipelining = True\n default_user = 'root'\n\n def __init__(self, play_context, new_stdin, *args, **kwargs):\n super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)\n\n self._host = self._play_context.remote_addr\n try:\n self._lxc_cmd = get_bin_path(\"lxc\")\n except ValueError:\n raise AnsibleError(\"lxc command not found in PATH\")\n\n if self._play_context.remote_user is not None and self._play_context.remote_user != 'root':\n self._display.warning('lxd does not support remote_user, using container default: root')\n\n def _connect(self):\n \"\"\"connect to lxd (nothing to do here) \"\"\"\n super(Connection, self)._connect()\n\n if not self._connected:\n self._display.vvv(u\"ESTABLISH LXD CONNECTION FOR USER: root\", host=self._host)\n self._connected = True\n\n def exec_command(self, cmd, in_data=None, sudoable=True):\n \"\"\" execute a command on the lxd host \"\"\"\n super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)\n\n self._display.vvv(u\"EXEC {0}\".format(cmd), host=self._host)\n\n local_cmd = [self._lxc_cmd]\n if self.get_option(\"project\"):\n local_cmd.extend([\"--project\", self.get_option(\"project\")])\n local_cmd.extend([\n \"exec\",\n \"%s:%s\" % (self.get_option(\"remote\"), self.get_option(\"remote_addr\")),\n \"--\",\n self.get_option(\"executable\"), \"-c\", cmd\n ])\n\n local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]\n in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru')\n\n process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n stdout, stderr = process.communicate(in_data)\n\n stdout = to_text(stdout)\n stderr = to_text(stderr)\n\n if stderr == \"error: Container is not running.\\n\":\n raise AnsibleConnectionFailure(\"container not running: %s\" % self._host)\n\n if stderr == \"error: not found\\n\":\n raise AnsibleConnectionFailure(\"container not found: %s\" % self._host)\n\n return process.returncode, stdout, stderr\n\n def put_file(self, in_path, out_path):\n \"\"\" put a file from local to lxd \"\"\"\n super(Connection, self).put_file(in_path, out_path)\n\n self._display.vvv(u\"PUT {0} TO {1}\".format(in_path, out_path), host=self._host)\n\n if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')):\n raise AnsibleFileNotFound(\"input path is not a file: %s\" % in_path)\n\n local_cmd = [self._lxc_cmd]\n if self.get_option(\"project\"):\n local_cmd.extend([\"--project\", self.get_option(\"project\")])\n local_cmd.extend([\n \"file\", \"push\",\n in_path,\n \"%s:%s/%s\" % (self.get_option(\"remote\"), self.get_option(\"remote_addr\"), out_path)\n ])\n\n local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]\n\n process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n process.communicate()\n\n def fetch_file(self, in_path, out_path):\n \"\"\" fetch a file from lxd to local \"\"\"\n super(Connection, self).fetch_file(in_path, out_path)\n\n self._display.vvv(u\"FETCH {0} TO {1}\".format(in_path, out_path), host=self._host)\n\n local_cmd = [self._lxc_cmd]\n if self.get_option(\"project\"):\n local_cmd.extend([\"--project\", self.get_option(\"project\")])\n local_cmd.extend([\n \"file\", \"pull\",\n \"%s:%s/%s\" % (self.get_option(\"remote\"), self.get_option(\"remote_addr\"), in_path),\n out_path\n ])\n\n local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]\n\n process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n process.communicate()\n\n def close(self):\n \"\"\" close the connection (nothing to do here) \"\"\"\n super(Connection, self).close()\n\n self._connected = False\n", "path": "plugins/connection/lxd.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# (c) 2016 Matt Clay <[email protected]>\n# (c) 2017 Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = '''\n author: Matt Clay (@mattclay) <[email protected]>\n name: lxd\n short_description: Run tasks in lxc containers via lxc CLI\n description:\n - Run commands or put/fetch files to an existing lxc container using lxc CLI\n options:\n remote_addr:\n description:\n - Container identifier.\n default: inventory_hostname\n vars:\n - name: inventory_hostname\n - name: ansible_host\n - name: ansible_lxd_host\n executable:\n description:\n - shell to use for execution inside container\n default: /bin/sh\n vars:\n - name: ansible_executable\n - name: ansible_lxd_executable\n remote:\n description:\n - Name of the LXD remote to use.\n default: local\n vars:\n - name: ansible_lxd_remote\n version_added: 2.0.0\n project:\n description:\n - Name of the LXD project to use.\n vars:\n - name: ansible_lxd_project\n version_added: 2.0.0\n'''\n\nimport os\nfrom subprocess import Popen, PIPE\n\nfrom ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound\nfrom ansible.module_utils.common.process import get_bin_path\nfrom ansible.module_utils.common.text.converters import to_bytes, to_text\nfrom ansible.plugins.connection import ConnectionBase\n\n\nclass Connection(ConnectionBase):\n \"\"\" lxd based connections \"\"\"\n\n transport = 'community.general.lxd'\n has_pipelining = True\n default_user = 'root'\n\n def __init__(self, play_context, new_stdin, *args, **kwargs):\n super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)\n\n try:\n self._lxc_cmd = get_bin_path(\"lxc\")\n except ValueError:\n raise AnsibleError(\"lxc command not found in PATH\")\n\n if self._play_context.remote_user is not None and self._play_context.remote_user != 'root':\n self._display.warning('lxd does not support remote_user, using container default: root')\n\n def _connect(self):\n \"\"\"connect to lxd (nothing to do here) \"\"\"\n super(Connection, self)._connect()\n\n if not self._connected:\n self._display.vvv(u\"ESTABLISH LXD CONNECTION FOR USER: root\", host=self.get_option('remote_addr'))\n self._connected = True\n\n def exec_command(self, cmd, in_data=None, sudoable=True):\n \"\"\" execute a command on the lxd host \"\"\"\n super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)\n\n self._display.vvv(u\"EXEC {0}\".format(cmd), host=self.get_option('remote_addr'))\n\n local_cmd = [self._lxc_cmd]\n if self.get_option(\"project\"):\n local_cmd.extend([\"--project\", self.get_option(\"project\")])\n local_cmd.extend([\n \"exec\",\n \"%s:%s\" % (self.get_option(\"remote\"), self.get_option(\"remote_addr\")),\n \"--\",\n self.get_option(\"executable\"), \"-c\", cmd\n ])\n\n local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]\n in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru')\n\n process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n stdout, stderr = process.communicate(in_data)\n\n stdout = to_text(stdout)\n stderr = to_text(stderr)\n\n if stderr == \"error: Container is not running.\\n\":\n raise AnsibleConnectionFailure(\"container not running: %s\" % self.get_option('remote_addr'))\n\n if stderr == \"error: not found\\n\":\n raise AnsibleConnectionFailure(\"container not found: %s\" % self.get_option('remote_addr'))\n\n return process.returncode, stdout, stderr\n\n def put_file(self, in_path, out_path):\n \"\"\" put a file from local to lxd \"\"\"\n super(Connection, self).put_file(in_path, out_path)\n\n self._display.vvv(u\"PUT {0} TO {1}\".format(in_path, out_path), host=self.get_option('remote_addr'))\n\n if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')):\n raise AnsibleFileNotFound(\"input path is not a file: %s\" % in_path)\n\n local_cmd = [self._lxc_cmd]\n if self.get_option(\"project\"):\n local_cmd.extend([\"--project\", self.get_option(\"project\")])\n local_cmd.extend([\n \"file\", \"push\",\n in_path,\n \"%s:%s/%s\" % (self.get_option(\"remote\"), self.get_option(\"remote_addr\"), out_path)\n ])\n\n local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]\n\n process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n process.communicate()\n\n def fetch_file(self, in_path, out_path):\n \"\"\" fetch a file from lxd to local \"\"\"\n super(Connection, self).fetch_file(in_path, out_path)\n\n self._display.vvv(u\"FETCH {0} TO {1}\".format(in_path, out_path), host=self.get_option('remote_addr'))\n\n local_cmd = [self._lxc_cmd]\n if self.get_option(\"project\"):\n local_cmd.extend([\"--project\", self.get_option(\"project\")])\n local_cmd.extend([\n \"file\", \"pull\",\n \"%s:%s/%s\" % (self.get_option(\"remote\"), self.get_option(\"remote_addr\"), in_path),\n out_path\n ])\n\n local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]\n\n process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n process.communicate()\n\n def close(self):\n \"\"\" close the connection (nothing to do here) \"\"\"\n super(Connection, self).close()\n\n self._connected = False\n", "path": "plugins/connection/lxd.py"}]}
| 3,095 | 787 |
gh_patches_debug_8441
|
rasdani/github-patches
|
git_diff
|
pyqtgraph__pyqtgraph-953
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
HistogramLUTWidget kargs bug?
# lut_widget = HistogramLUTWidget(background='w')
File "/usr/local/lib/python3.4/dist-packages/pyqtgraph-0.9.8-py3.4.egg/pyqtgraph/widgets/HistogramLUTWidget.py", line 18, in **init**
self.item = HistogramLUTItem(_args, *_kargs)
# TypeError: **init**() got an unexpected keyword argument 'background'
I can fix it by:
class HistogramLUTWidget(pg.GraphicsView):
```
def __init__(self, parent=None, *args, **kargs):
# background = kargs.get('background', 'default')
background = kargs.pop('background', 'default')
```
...
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyqtgraph/widgets/HistogramLUTWidget.py`
Content:
```
1 """
2 Widget displaying an image histogram along with gradient editor. Can be used to adjust the appearance of images.
3 This is a wrapper around HistogramLUTItem
4 """
5
6 from ..Qt import QtGui, QtCore
7 from .GraphicsView import GraphicsView
8 from ..graphicsItems.HistogramLUTItem import HistogramLUTItem
9
10 __all__ = ['HistogramLUTWidget']
11
12
13 class HistogramLUTWidget(GraphicsView):
14
15 def __init__(self, parent=None, *args, **kargs):
16 background = kargs.get('background', 'default')
17 GraphicsView.__init__(self, parent, useOpenGL=False, background=background)
18 self.item = HistogramLUTItem(*args, **kargs)
19 self.setCentralItem(self.item)
20 self.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
21 self.setMinimumWidth(95)
22
23
24 def sizeHint(self):
25 return QtCore.QSize(115, 200)
26
27
28
29 def __getattr__(self, attr):
30 return getattr(self.item, attr)
31
32
33
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyqtgraph/widgets/HistogramLUTWidget.py b/pyqtgraph/widgets/HistogramLUTWidget.py
--- a/pyqtgraph/widgets/HistogramLUTWidget.py
+++ b/pyqtgraph/widgets/HistogramLUTWidget.py
@@ -13,7 +13,7 @@
class HistogramLUTWidget(GraphicsView):
def __init__(self, parent=None, *args, **kargs):
- background = kargs.get('background', 'default')
+ background = kargs.pop('background', 'default')
GraphicsView.__init__(self, parent, useOpenGL=False, background=background)
self.item = HistogramLUTItem(*args, **kargs)
self.setCentralItem(self.item)
|
{"golden_diff": "diff --git a/pyqtgraph/widgets/HistogramLUTWidget.py b/pyqtgraph/widgets/HistogramLUTWidget.py\n--- a/pyqtgraph/widgets/HistogramLUTWidget.py\n+++ b/pyqtgraph/widgets/HistogramLUTWidget.py\n@@ -13,7 +13,7 @@\n class HistogramLUTWidget(GraphicsView):\n \n def __init__(self, parent=None, *args, **kargs):\n- background = kargs.get('background', 'default')\n+ background = kargs.pop('background', 'default')\n GraphicsView.__init__(self, parent, useOpenGL=False, background=background)\n self.item = HistogramLUTItem(*args, **kargs)\n self.setCentralItem(self.item)\n", "issue": "HistogramLUTWidget kargs bug?\n# lut_widget = HistogramLUTWidget(background='w')\n\n File \"/usr/local/lib/python3.4/dist-packages/pyqtgraph-0.9.8-py3.4.egg/pyqtgraph/widgets/HistogramLUTWidget.py\", line 18, in **init**\n self.item = HistogramLUTItem(_args, *_kargs)\n# TypeError: **init**() got an unexpected keyword argument 'background'\n\nI can fix it by:\n\nclass HistogramLUTWidget(pg.GraphicsView):\n\n```\ndef __init__(self, parent=None, *args, **kargs):\n # background = kargs.get('background', 'default')\n background = kargs.pop('background', 'default')\n```\n\n...\n\n", "before_files": [{"content": "\"\"\"\nWidget displaying an image histogram along with gradient editor. Can be used to adjust the appearance of images.\nThis is a wrapper around HistogramLUTItem\n\"\"\"\n\nfrom ..Qt import QtGui, QtCore\nfrom .GraphicsView import GraphicsView\nfrom ..graphicsItems.HistogramLUTItem import HistogramLUTItem\n\n__all__ = ['HistogramLUTWidget']\n\n\nclass HistogramLUTWidget(GraphicsView):\n \n def __init__(self, parent=None, *args, **kargs):\n background = kargs.get('background', 'default')\n GraphicsView.__init__(self, parent, useOpenGL=False, background=background)\n self.item = HistogramLUTItem(*args, **kargs)\n self.setCentralItem(self.item)\n self.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)\n self.setMinimumWidth(95)\n \n\n def sizeHint(self):\n return QtCore.QSize(115, 200)\n \n \n\n def __getattr__(self, attr):\n return getattr(self.item, attr)\n\n\n\n", "path": "pyqtgraph/widgets/HistogramLUTWidget.py"}], "after_files": [{"content": "\"\"\"\nWidget displaying an image histogram along with gradient editor. Can be used to adjust the appearance of images.\nThis is a wrapper around HistogramLUTItem\n\"\"\"\n\nfrom ..Qt import QtGui, QtCore\nfrom .GraphicsView import GraphicsView\nfrom ..graphicsItems.HistogramLUTItem import HistogramLUTItem\n\n__all__ = ['HistogramLUTWidget']\n\n\nclass HistogramLUTWidget(GraphicsView):\n \n def __init__(self, parent=None, *args, **kargs):\n background = kargs.pop('background', 'default')\n GraphicsView.__init__(self, parent, useOpenGL=False, background=background)\n self.item = HistogramLUTItem(*args, **kargs)\n self.setCentralItem(self.item)\n self.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)\n self.setMinimumWidth(95)\n \n\n def sizeHint(self):\n return QtCore.QSize(115, 200)\n \n \n\n def __getattr__(self, attr):\n return getattr(self.item, attr)\n\n\n\n", "path": "pyqtgraph/widgets/HistogramLUTWidget.py"}]}
| 709 | 158 |
gh_patches_debug_6219
|
rasdani/github-patches
|
git_diff
|
openstates__openstates-scrapers-1502
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AR scraper failing since at least 2017-03-11
State: AR - scraper has been failing since 2017-03-11
Based on automated runs it appears that AR has not run successfully in 2 days (2017-03-11).
```/usr/local/bin/billy-update ar``` | **failed during bills**
```
Traceback (most recent call last):
File "/opt/openstates/billy/billy/bin/update.py", line 368, in main
run_record += _run_scraper(stype, args, metadata)
File "/opt/openstates/billy/billy/bin/update.py", line 102, in _run_scraper
scraper.scrape(chamber, time)
File "/srv/openstates-web/openstates/ar/bills.py", line 40, in scrape
self.save_bill(bill)
File "/opt/openstates/billy/billy/scrape/__init__.py", line 199, in save_object
self.validate_json(obj)
File "/opt/openstates/billy/billy/scrape/__init__.py", line 130, in validate_json
raise ve
FieldValidationError: Value u'' for field '<obj>.sponsors[0].name' cannot be blank'
```
Visit http://bobsled.openstates.org/ for more info.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openstates/ar/bills.py`
Content:
```
1 import re
2 import csv
3 import StringIO
4 import datetime
5
6 from billy.scrape.bills import BillScraper, Bill
7 from billy.scrape.votes import Vote
8
9 import lxml.html
10
11 import scrapelib
12
13
14 def unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):
15 # csv.py doesn't do Unicode; encode temporarily as UTF-8:
16 csv_reader = csv.reader(utf_8_encoder(unicode_csv_data),
17 dialect=dialect, **kwargs)
18 for row in csv_reader:
19 # decode UTF-8 back to Unicode, cell by cell:
20 yield [unicode(cell, 'utf-8') for cell in row]
21
22
23 def utf_8_encoder(unicode_csv_data):
24 for line in unicode_csv_data:
25 yield line.encode('utf-8')
26
27
28 class ARBillScraper(BillScraper):
29 jurisdiction = 'ar'
30
31 def scrape(self, chamber, session):
32 self.bills = {}
33
34 self.slug = self.metadata['session_details'][session]['slug']
35
36 self.scrape_bill(chamber, session)
37 self.scrape_actions()
38
39 for bill in self.bills.itervalues():
40 self.save_bill(bill)
41
42 def scrape_bill(self, chamber, session):
43 url = "ftp://www.arkleg.state.ar.us/dfadooas/LegislativeMeasures.txt"
44 page = self.get(url).text
45 page = unicode_csv_reader(StringIO.StringIO(page), delimiter='|')
46
47 for row in page:
48 bill_chamber = {'H': 'lower', 'S': 'upper'}[row[0]]
49 if bill_chamber != chamber:
50 continue
51
52 bill_id = "%s%s %s" % (row[0], row[1], row[2])
53
54 type_spec = re.match(r'(H|S)([A-Z]+)\s', bill_id).group(2)
55 bill_type = {
56 'B': 'bill',
57 'R': 'resolution',
58 'JR': 'joint resolution',
59 'CR': 'concurrent resolution',
60 'MR': 'memorial resolution',
61 'CMR': 'concurrent memorial resolution'}[type_spec]
62
63 if row[-1] != self.slug:
64 continue
65
66 bill = Bill(session, chamber, bill_id, row[3], type=bill_type)
67 bill.add_source(url)
68
69 primary = row[11]
70 if not primary:
71 primary = row[12]
72 bill.add_sponsor('primary', primary)
73
74 # ftp://www.arkleg.state.ar.us/Bills/
75 # TODO: Keep on eye on this post 2017 to see if they apply R going forward.
76 session_code = '2017R' if session == '2017' else session
77
78 version_url = ("ftp://www.arkleg.state.ar.us/Bills/"
79 "%s/Public/%s.pdf" % (
80 session_code, bill_id.replace(' ', '')))
81 bill.add_version(bill_id, version_url, mimetype='application/pdf')
82
83 self.scrape_bill_page(bill)
84
85 self.bills[bill_id] = bill
86
87 def scrape_actions(self):
88 url = "ftp://www.arkleg.state.ar.us/dfadooas/ChamberActions.txt"
89 page = self.get(url).text
90 page = csv.reader(StringIO.StringIO(page))
91
92 for row in page:
93 bill_id = "%s%s %s" % (row[1], row[2], row[3])
94
95 if bill_id not in self.bills:
96 continue
97 # different term
98 if row[-2] != self.slug:
99 continue
100
101 # Commas aren't escaped, but only one field (the action) can
102 # contain them so we can work around it by using both positive
103 # and negative offsets
104 bill_id = "%s%s %s" % (row[1], row[2], row[3])
105 actor = {'HU': 'lower', 'SU': 'upper'}[row[-5].upper()]
106 # manual fix for crazy time value
107 row[6] = row[6].replace('.520000000', '')
108 date = datetime.datetime.strptime(row[6], "%Y-%m-%d %H:%M:%S")
109 action = ','.join(row[7:-5])
110
111 action_type = []
112 if action.startswith('Filed'):
113 action_type.append('bill:introduced')
114 elif (action.startswith('Read first time') or
115 action.startswith('Read the first time')):
116 action_type.append('bill:reading:1')
117 if re.match('Read the first time, .*, read the second time', action):
118 action_type.append('bill:reading:2')
119 elif action.startswith('Read the third time and passed'):
120 action_type.append('bill:passed')
121 action_type.append('bill:reading:3')
122 elif action.startswith('Read the third time'):
123 action_type.append('bill:reading:3')
124 elif action.startswith('DELIVERED TO GOVERNOR'):
125 action_type.append('governor:received')
126 elif action.startswith('Notification'):
127 action_type.append('governor:signed')
128
129 if 'referred to' in action:
130 action_type.append('committee:referred')
131
132 if 'Returned by the Committee' in action:
133 if 'recommendation that it Do Pass' in action:
134 action_type.append('committee:passed:favorable')
135 else:
136 action_type.append('committee:passed')
137
138 if re.match(r'Amendment No\. \d+ read and adopted', action):
139 action_type.append('amendment:introduced')
140 action_type.append('amendment:passed')
141
142 if not action:
143 action = '[No text provided]'
144 self.bills[bill_id].add_action(actor, action, date,
145 type=action_type or ['other'])
146
147 def scrape_bill_page(self, bill):
148 # We need to scrape each bill page in order to grab associated votes.
149 # It's still more efficient to get the rest of the data we're
150 # interested in from the CSVs, though, because their site splits
151 # other info (e.g. actions) across many pages
152 for t in self.metadata['terms']:
153 if bill['session'] in t['sessions']:
154 term_year = t['start_year']
155 break
156 measureno = bill['bill_id'].replace(' ', '')
157 url = ("http://www.arkleg.state.ar.us/assembly/%s/%s/"
158 "Pages/BillInformation.aspx?measureno=%s" % (
159 term_year, self.slug, measureno))
160 bill.add_source(url)
161
162 page = lxml.html.fromstring(self.get(url).text)
163 page.make_links_absolute(url)
164
165 for link in page.xpath("//a[contains(@href, 'Amendments')]"):
166 num = link.xpath("string(../../td[2])")
167 name = "Amendment %s" % num
168 bill.add_document(name, link.attrib['href'])
169
170 try:
171 cosponsor_link = page.xpath(
172 "//a[contains(@href, 'CoSponsors')]")[0]
173 self.scrape_cosponsors(bill, cosponsor_link.attrib['href'])
174 except IndexError:
175 # No cosponsor link is OK
176 pass
177
178 # hist_link = page.xpath("//a[contains(@href, 'BillStatusHistory')]")[0]
179 # self.scrape_votes(bill, hist_link.attrib['href'])
180
181 # def scrape_votes(self, bill, url):
182 # page = lxml.html.fromstring(self.get(url).text)
183 # page.make_links_absolute(url)
184
185 for link in page.xpath("//a[contains(@href, 'votes.aspx')]"):
186 date = link.xpath("string(../../td[2])")
187 date = datetime.datetime.strptime(date, "%m/%d/%Y %I:%M:%S %p")
188
189 motion = link.xpath("string(../../td[3])")
190
191 self.scrape_vote(bill, date, motion, link.attrib['href'])
192
193 def scrape_vote(self, bill, date, motion, url):
194 try:
195 page = self.get(url).text
196 except scrapelib.HTTPError:
197 #sometiems the link is there but is dead
198 return
199
200 if 'not yet official' in page:
201 # Sometimes they link to vote pages before they go live
202 return
203
204 page = lxml.html.fromstring(page)
205
206 if url.endswith('Senate'):
207 actor = 'upper'
208 else:
209 actor = 'lower'
210
211 count_path = "string(//td[@align = 'center' and contains(., '%s: ')])"
212 yes_count = int(page.xpath(count_path % "Yeas").split()[-1])
213 no_count = int(page.xpath(count_path % "Nays").split()[-1])
214 other_count = int(page.xpath(count_path % "Non Voting").split()[-1])
215 other_count += int(page.xpath(count_path % "Present").split()[-1])
216
217 passed = yes_count > no_count + other_count
218 vote = Vote(actor, date, motion, passed, yes_count,
219 no_count, other_count)
220 vote.add_source(url)
221
222 xpath = (
223 '//*[contains(@class, "ms-standardheader")]/'
224 'following-sibling::table')
225 divs = page.xpath(xpath)
226 votevals = 'yes no other other'.split()
227 for (voteval, div) in zip(votevals, divs):
228 for a in div.xpath('.//a'):
229 name = a.text_content().strip()
230 if not name:
231 continue
232 getattr(vote, voteval)(name)
233 bill.add_vote(vote)
234
235 def scrape_cosponsors(self, bill, url):
236 page = self.get(url).text
237 page = lxml.html.fromstring(page)
238
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/openstates/ar/bills.py b/openstates/ar/bills.py
--- a/openstates/ar/bills.py
+++ b/openstates/ar/bills.py
@@ -69,7 +69,8 @@
primary = row[11]
if not primary:
primary = row[12]
- bill.add_sponsor('primary', primary)
+ if primary:
+ bill.add_sponsor('primary', primary)
# ftp://www.arkleg.state.ar.us/Bills/
# TODO: Keep on eye on this post 2017 to see if they apply R going forward.
|
{"golden_diff": "diff --git a/openstates/ar/bills.py b/openstates/ar/bills.py\n--- a/openstates/ar/bills.py\n+++ b/openstates/ar/bills.py\n@@ -69,7 +69,8 @@\n primary = row[11]\n if not primary:\n primary = row[12]\n- bill.add_sponsor('primary', primary)\n+ if primary:\n+ bill.add_sponsor('primary', primary)\n \n # ftp://www.arkleg.state.ar.us/Bills/\n # TODO: Keep on eye on this post 2017 to see if they apply R going forward.\n", "issue": "AR scraper failing since at least 2017-03-11\nState: AR - scraper has been failing since 2017-03-11\n\nBased on automated runs it appears that AR has not run successfully in 2 days (2017-03-11).\n\n```/usr/local/bin/billy-update ar``` | **failed during bills**\n\n```\n Traceback (most recent call last):\n File \"/opt/openstates/billy/billy/bin/update.py\", line 368, in main\n run_record += _run_scraper(stype, args, metadata)\n File \"/opt/openstates/billy/billy/bin/update.py\", line 102, in _run_scraper\n scraper.scrape(chamber, time)\n File \"/srv/openstates-web/openstates/ar/bills.py\", line 40, in scrape\n self.save_bill(bill)\n File \"/opt/openstates/billy/billy/scrape/__init__.py\", line 199, in save_object\n self.validate_json(obj)\n File \"/opt/openstates/billy/billy/scrape/__init__.py\", line 130, in validate_json\n raise ve\nFieldValidationError: Value u'' for field '<obj>.sponsors[0].name' cannot be blank'\n\n```\n\nVisit http://bobsled.openstates.org/ for more info.\n\n", "before_files": [{"content": "import re\nimport csv\nimport StringIO\nimport datetime\n\nfrom billy.scrape.bills import BillScraper, Bill\nfrom billy.scrape.votes import Vote\n\nimport lxml.html\n\nimport scrapelib\n\n\ndef unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):\n # csv.py doesn't do Unicode; encode temporarily as UTF-8:\n csv_reader = csv.reader(utf_8_encoder(unicode_csv_data),\n dialect=dialect, **kwargs)\n for row in csv_reader:\n # decode UTF-8 back to Unicode, cell by cell:\n yield [unicode(cell, 'utf-8') for cell in row]\n\n\ndef utf_8_encoder(unicode_csv_data):\n for line in unicode_csv_data:\n yield line.encode('utf-8')\n\n\nclass ARBillScraper(BillScraper):\n jurisdiction = 'ar'\n\n def scrape(self, chamber, session):\n self.bills = {}\n\n self.slug = self.metadata['session_details'][session]['slug']\n\n self.scrape_bill(chamber, session)\n self.scrape_actions()\n\n for bill in self.bills.itervalues():\n self.save_bill(bill)\n\n def scrape_bill(self, chamber, session):\n url = \"ftp://www.arkleg.state.ar.us/dfadooas/LegislativeMeasures.txt\"\n page = self.get(url).text\n page = unicode_csv_reader(StringIO.StringIO(page), delimiter='|')\n\n for row in page:\n bill_chamber = {'H': 'lower', 'S': 'upper'}[row[0]]\n if bill_chamber != chamber:\n continue\n\n bill_id = \"%s%s %s\" % (row[0], row[1], row[2])\n\n type_spec = re.match(r'(H|S)([A-Z]+)\\s', bill_id).group(2)\n bill_type = {\n 'B': 'bill',\n 'R': 'resolution',\n 'JR': 'joint resolution',\n 'CR': 'concurrent resolution',\n 'MR': 'memorial resolution',\n 'CMR': 'concurrent memorial resolution'}[type_spec]\n\n if row[-1] != self.slug:\n continue\n\n bill = Bill(session, chamber, bill_id, row[3], type=bill_type)\n bill.add_source(url)\n\n primary = row[11]\n if not primary:\n primary = row[12]\n bill.add_sponsor('primary', primary)\n\n # ftp://www.arkleg.state.ar.us/Bills/\n # TODO: Keep on eye on this post 2017 to see if they apply R going forward.\n session_code = '2017R' if session == '2017' else session\n\n version_url = (\"ftp://www.arkleg.state.ar.us/Bills/\"\n \"%s/Public/%s.pdf\" % (\n session_code, bill_id.replace(' ', '')))\n bill.add_version(bill_id, version_url, mimetype='application/pdf')\n\n self.scrape_bill_page(bill)\n\n self.bills[bill_id] = bill\n\n def scrape_actions(self):\n url = \"ftp://www.arkleg.state.ar.us/dfadooas/ChamberActions.txt\"\n page = self.get(url).text\n page = csv.reader(StringIO.StringIO(page))\n\n for row in page:\n bill_id = \"%s%s %s\" % (row[1], row[2], row[3])\n\n if bill_id not in self.bills:\n continue\n # different term\n if row[-2] != self.slug:\n continue\n\n # Commas aren't escaped, but only one field (the action) can\n # contain them so we can work around it by using both positive\n # and negative offsets\n bill_id = \"%s%s %s\" % (row[1], row[2], row[3])\n actor = {'HU': 'lower', 'SU': 'upper'}[row[-5].upper()]\n # manual fix for crazy time value\n row[6] = row[6].replace('.520000000', '')\n date = datetime.datetime.strptime(row[6], \"%Y-%m-%d %H:%M:%S\")\n action = ','.join(row[7:-5])\n\n action_type = []\n if action.startswith('Filed'):\n action_type.append('bill:introduced')\n elif (action.startswith('Read first time') or\n action.startswith('Read the first time')):\n action_type.append('bill:reading:1')\n if re.match('Read the first time, .*, read the second time', action):\n action_type.append('bill:reading:2')\n elif action.startswith('Read the third time and passed'):\n action_type.append('bill:passed')\n action_type.append('bill:reading:3')\n elif action.startswith('Read the third time'):\n action_type.append('bill:reading:3')\n elif action.startswith('DELIVERED TO GOVERNOR'):\n action_type.append('governor:received')\n elif action.startswith('Notification'):\n action_type.append('governor:signed')\n\n if 'referred to' in action:\n action_type.append('committee:referred')\n\n if 'Returned by the Committee' in action:\n if 'recommendation that it Do Pass' in action:\n action_type.append('committee:passed:favorable')\n else:\n action_type.append('committee:passed')\n\n if re.match(r'Amendment No\\. \\d+ read and adopted', action):\n action_type.append('amendment:introduced')\n action_type.append('amendment:passed')\n\n if not action:\n action = '[No text provided]'\n self.bills[bill_id].add_action(actor, action, date,\n type=action_type or ['other'])\n\n def scrape_bill_page(self, bill):\n # We need to scrape each bill page in order to grab associated votes.\n # It's still more efficient to get the rest of the data we're\n # interested in from the CSVs, though, because their site splits\n # other info (e.g. actions) across many pages\n for t in self.metadata['terms']:\n if bill['session'] in t['sessions']:\n term_year = t['start_year']\n break\n measureno = bill['bill_id'].replace(' ', '')\n url = (\"http://www.arkleg.state.ar.us/assembly/%s/%s/\"\n \"Pages/BillInformation.aspx?measureno=%s\" % (\n term_year, self.slug, measureno))\n bill.add_source(url)\n\n page = lxml.html.fromstring(self.get(url).text)\n page.make_links_absolute(url)\n\n for link in page.xpath(\"//a[contains(@href, 'Amendments')]\"):\n num = link.xpath(\"string(../../td[2])\")\n name = \"Amendment %s\" % num\n bill.add_document(name, link.attrib['href'])\n\n try:\n cosponsor_link = page.xpath(\n \"//a[contains(@href, 'CoSponsors')]\")[0]\n self.scrape_cosponsors(bill, cosponsor_link.attrib['href'])\n except IndexError:\n # No cosponsor link is OK\n pass\n\n # hist_link = page.xpath(\"//a[contains(@href, 'BillStatusHistory')]\")[0]\n # self.scrape_votes(bill, hist_link.attrib['href'])\n\n # def scrape_votes(self, bill, url):\n # page = lxml.html.fromstring(self.get(url).text)\n # page.make_links_absolute(url)\n\n for link in page.xpath(\"//a[contains(@href, 'votes.aspx')]\"):\n date = link.xpath(\"string(../../td[2])\")\n date = datetime.datetime.strptime(date, \"%m/%d/%Y %I:%M:%S %p\")\n\n motion = link.xpath(\"string(../../td[3])\")\n\n self.scrape_vote(bill, date, motion, link.attrib['href'])\n\n def scrape_vote(self, bill, date, motion, url):\n try:\n page = self.get(url).text\n except scrapelib.HTTPError:\n #sometiems the link is there but is dead\n return\n\n if 'not yet official' in page:\n # Sometimes they link to vote pages before they go live\n return\n\n page = lxml.html.fromstring(page)\n\n if url.endswith('Senate'):\n actor = 'upper'\n else:\n actor = 'lower'\n\n count_path = \"string(//td[@align = 'center' and contains(., '%s: ')])\"\n yes_count = int(page.xpath(count_path % \"Yeas\").split()[-1])\n no_count = int(page.xpath(count_path % \"Nays\").split()[-1])\n other_count = int(page.xpath(count_path % \"Non Voting\").split()[-1])\n other_count += int(page.xpath(count_path % \"Present\").split()[-1])\n\n passed = yes_count > no_count + other_count\n vote = Vote(actor, date, motion, passed, yes_count,\n no_count, other_count)\n vote.add_source(url)\n\n xpath = (\n '//*[contains(@class, \"ms-standardheader\")]/'\n 'following-sibling::table')\n divs = page.xpath(xpath)\n votevals = 'yes no other other'.split()\n for (voteval, div) in zip(votevals, divs):\n for a in div.xpath('.//a'):\n name = a.text_content().strip()\n if not name:\n continue\n getattr(vote, voteval)(name)\n bill.add_vote(vote)\n\n def scrape_cosponsors(self, bill, url):\n page = self.get(url).text\n page = lxml.html.fromstring(page)\n", "path": "openstates/ar/bills.py"}], "after_files": [{"content": "import re\nimport csv\nimport StringIO\nimport datetime\n\nfrom billy.scrape.bills import BillScraper, Bill\nfrom billy.scrape.votes import Vote\n\nimport lxml.html\n\nimport scrapelib\n\n\ndef unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):\n # csv.py doesn't do Unicode; encode temporarily as UTF-8:\n csv_reader = csv.reader(utf_8_encoder(unicode_csv_data),\n dialect=dialect, **kwargs)\n for row in csv_reader:\n # decode UTF-8 back to Unicode, cell by cell:\n yield [unicode(cell, 'utf-8') for cell in row]\n\n\ndef utf_8_encoder(unicode_csv_data):\n for line in unicode_csv_data:\n yield line.encode('utf-8')\n\n\nclass ARBillScraper(BillScraper):\n jurisdiction = 'ar'\n\n def scrape(self, chamber, session):\n self.bills = {}\n\n self.slug = self.metadata['session_details'][session]['slug']\n\n self.scrape_bill(chamber, session)\n self.scrape_actions()\n\n for bill in self.bills.itervalues():\n self.save_bill(bill)\n\n def scrape_bill(self, chamber, session):\n url = \"ftp://www.arkleg.state.ar.us/dfadooas/LegislativeMeasures.txt\"\n page = self.get(url).text\n page = unicode_csv_reader(StringIO.StringIO(page), delimiter='|')\n\n for row in page:\n bill_chamber = {'H': 'lower', 'S': 'upper'}[row[0]]\n if bill_chamber != chamber:\n continue\n\n bill_id = \"%s%s %s\" % (row[0], row[1], row[2])\n\n type_spec = re.match(r'(H|S)([A-Z]+)\\s', bill_id).group(2)\n bill_type = {\n 'B': 'bill',\n 'R': 'resolution',\n 'JR': 'joint resolution',\n 'CR': 'concurrent resolution',\n 'MR': 'memorial resolution',\n 'CMR': 'concurrent memorial resolution'}[type_spec]\n\n if row[-1] != self.slug:\n continue\n\n bill = Bill(session, chamber, bill_id, row[3], type=bill_type)\n bill.add_source(url)\n\n primary = row[11]\n if not primary:\n primary = row[12]\n if primary:\n bill.add_sponsor('primary', primary)\n\n # ftp://www.arkleg.state.ar.us/Bills/\n # TODO: Keep on eye on this post 2017 to see if they apply R going forward.\n session_code = '2017R' if session == '2017' else session\n\n version_url = (\"ftp://www.arkleg.state.ar.us/Bills/\"\n \"%s/Public/%s.pdf\" % (\n session_code, bill_id.replace(' ', '')))\n bill.add_version(bill_id, version_url, mimetype='application/pdf')\n\n self.scrape_bill_page(bill)\n\n self.bills[bill_id] = bill\n\n def scrape_actions(self):\n url = \"ftp://www.arkleg.state.ar.us/dfadooas/ChamberActions.txt\"\n page = self.get(url).text\n page = csv.reader(StringIO.StringIO(page))\n\n for row in page:\n bill_id = \"%s%s %s\" % (row[1], row[2], row[3])\n\n if bill_id not in self.bills:\n continue\n # different term\n if row[-2] != self.slug:\n continue\n\n # Commas aren't escaped, but only one field (the action) can\n # contain them so we can work around it by using both positive\n # and negative offsets\n bill_id = \"%s%s %s\" % (row[1], row[2], row[3])\n actor = {'HU': 'lower', 'SU': 'upper'}[row[-5].upper()]\n # manual fix for crazy time value\n row[6] = row[6].replace('.520000000', '')\n date = datetime.datetime.strptime(row[6], \"%Y-%m-%d %H:%M:%S\")\n action = ','.join(row[7:-5])\n\n action_type = []\n if action.startswith('Filed'):\n action_type.append('bill:introduced')\n elif (action.startswith('Read first time') or\n action.startswith('Read the first time')):\n action_type.append('bill:reading:1')\n if re.match('Read the first time, .*, read the second time', action):\n action_type.append('bill:reading:2')\n elif action.startswith('Read the third time and passed'):\n action_type.append('bill:passed')\n action_type.append('bill:reading:3')\n elif action.startswith('Read the third time'):\n action_type.append('bill:reading:3')\n elif action.startswith('DELIVERED TO GOVERNOR'):\n action_type.append('governor:received')\n elif action.startswith('Notification'):\n action_type.append('governor:signed')\n\n if 'referred to' in action:\n action_type.append('committee:referred')\n\n if 'Returned by the Committee' in action:\n if 'recommendation that it Do Pass' in action:\n action_type.append('committee:passed:favorable')\n else:\n action_type.append('committee:passed')\n\n if re.match(r'Amendment No\\. \\d+ read and adopted', action):\n action_type.append('amendment:introduced')\n action_type.append('amendment:passed')\n\n if not action:\n action = '[No text provided]'\n self.bills[bill_id].add_action(actor, action, date,\n type=action_type or ['other'])\n\n def scrape_bill_page(self, bill):\n # We need to scrape each bill page in order to grab associated votes.\n # It's still more efficient to get the rest of the data we're\n # interested in from the CSVs, though, because their site splits\n # other info (e.g. actions) across many pages\n for t in self.metadata['terms']:\n if bill['session'] in t['sessions']:\n term_year = t['start_year']\n break\n measureno = bill['bill_id'].replace(' ', '')\n url = (\"http://www.arkleg.state.ar.us/assembly/%s/%s/\"\n \"Pages/BillInformation.aspx?measureno=%s\" % (\n term_year, self.slug, measureno))\n bill.add_source(url)\n\n page = lxml.html.fromstring(self.get(url).text)\n page.make_links_absolute(url)\n\n for link in page.xpath(\"//a[contains(@href, 'Amendments')]\"):\n num = link.xpath(\"string(../../td[2])\")\n name = \"Amendment %s\" % num\n bill.add_document(name, link.attrib['href'])\n\n try:\n cosponsor_link = page.xpath(\n \"//a[contains(@href, 'CoSponsors')]\")[0]\n self.scrape_cosponsors(bill, cosponsor_link.attrib['href'])\n except IndexError:\n # No cosponsor link is OK\n pass\n\n # hist_link = page.xpath(\"//a[contains(@href, 'BillStatusHistory')]\")[0]\n # self.scrape_votes(bill, hist_link.attrib['href'])\n\n # def scrape_votes(self, bill, url):\n # page = lxml.html.fromstring(self.get(url).text)\n # page.make_links_absolute(url)\n\n for link in page.xpath(\"//a[contains(@href, 'votes.aspx')]\"):\n date = link.xpath(\"string(../../td[2])\")\n date = datetime.datetime.strptime(date, \"%m/%d/%Y %I:%M:%S %p\")\n\n motion = link.xpath(\"string(../../td[3])\")\n\n self.scrape_vote(bill, date, motion, link.attrib['href'])\n\n def scrape_vote(self, bill, date, motion, url):\n try:\n page = self.get(url).text\n except scrapelib.HTTPError:\n #sometiems the link is there but is dead\n return\n\n if 'not yet official' in page:\n # Sometimes they link to vote pages before they go live\n return\n\n page = lxml.html.fromstring(page)\n\n if url.endswith('Senate'):\n actor = 'upper'\n else:\n actor = 'lower'\n\n count_path = \"string(//td[@align = 'center' and contains(., '%s: ')])\"\n yes_count = int(page.xpath(count_path % \"Yeas\").split()[-1])\n no_count = int(page.xpath(count_path % \"Nays\").split()[-1])\n other_count = int(page.xpath(count_path % \"Non Voting\").split()[-1])\n other_count += int(page.xpath(count_path % \"Present\").split()[-1])\n\n passed = yes_count > no_count + other_count\n vote = Vote(actor, date, motion, passed, yes_count,\n no_count, other_count)\n vote.add_source(url)\n\n xpath = (\n '//*[contains(@class, \"ms-standardheader\")]/'\n 'following-sibling::table')\n divs = page.xpath(xpath)\n votevals = 'yes no other other'.split()\n for (voteval, div) in zip(votevals, divs):\n for a in div.xpath('.//a'):\n name = a.text_content().strip()\n if not name:\n continue\n getattr(vote, voteval)(name)\n bill.add_vote(vote)\n\n def scrape_cosponsors(self, bill, url):\n page = self.get(url).text\n page = lxml.html.fromstring(page)\n", "path": "openstates/ar/bills.py"}]}
| 3,298 | 135 |
gh_patches_debug_16517
|
rasdani/github-patches
|
git_diff
|
ultrabug__py3status-113
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typo in the keyboard_layout module
`xbklayout` function should be `xkblayout`, i.e. "kb" instead of "bk". This typo appears 3 times in total.
The rest of the code uses "kb" so I assumed what I found was a typo and decided to report it since it already caught my eye.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `py3status/modules/keyboard_layout.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 Display the current keyboard layout.
4
5 Configuration parameters:
6 - cache_timeout: check for keyboard layout change every seconds
7
8 Requires:
9 - xkblayout-state
10 or
11 - setxkbmap
12
13 @author shadowprince
14 @license Eclipse Public License
15 """
16
17 from subprocess import check_output
18 from time import time
19 import shlex
20 import re
21
22 # colors of layouts, check your command's output to match keys
23 LANG_COLORS = {
24 'fr': '#268BD2', # solarized blue
25 'ru': '#F75252', # red
26 'ua': '#FCE94F', # yellow
27 'us': '#729FCF', # light blue
28 }
29
30 LAYOUT_RE = re.compile(r".*layout:\s*(\w+).*", flags=re.DOTALL)
31
32
33 def xbklayout():
34 """
35 check using xkblayout-state (preferred method)
36 """
37 return check_output(
38 ["xkblayout-state", "print", "%s"]
39 ).decode('utf-8')
40
41
42 def setxkbmap():
43 """
44 check using setxkbmap >= 1.3.0
45
46 Please read issue 33 for more information :
47 https://github.com/ultrabug/py3status/pull/33
48 """
49 out = check_output(shlex.split("setxkbmap -query")).decode("utf-8")
50
51 return re.match(LAYOUT_RE, out).group(1)
52
53
54 class Py3status:
55 """
56 """
57 # available configuration parameters
58 cache_timeout = 10
59 color = ''
60
61 def __init__(self):
62 """
63 find the best implementation to get the keyboard's layout
64 """
65 try:
66 xbklayout()
67 except:
68 self.command = setxkbmap
69 else:
70 self.command = xbklayout
71
72 def keyboard_layout(self, i3s_output_list, i3s_config):
73 response = {
74 'cached_until': time() + self.cache_timeout,
75 'full_text': ''
76 }
77
78 lang = self.command().strip()
79 lang_color = self.color if self.color else LANG_COLORS.get(lang)
80
81 response['full_text'] = lang or '??'
82 if lang_color:
83 response['color'] = lang_color
84
85 return response
86
87 if __name__ == "__main__":
88 """
89 Test this module by calling it directly.
90 """
91 from time import sleep
92 x = Py3status()
93 config = {
94 'color_good': '#00FF00',
95 'color_bad': '#FF0000',
96 }
97 while True:
98 print(x.keyboard_layout([], config))
99 sleep(1)
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/py3status/modules/keyboard_layout.py b/py3status/modules/keyboard_layout.py
--- a/py3status/modules/keyboard_layout.py
+++ b/py3status/modules/keyboard_layout.py
@@ -30,7 +30,7 @@
LAYOUT_RE = re.compile(r".*layout:\s*(\w+).*", flags=re.DOTALL)
-def xbklayout():
+def xkblayout():
"""
check using xkblayout-state (preferred method)
"""
@@ -63,11 +63,11 @@
find the best implementation to get the keyboard's layout
"""
try:
- xbklayout()
+ xkblayout()
except:
self.command = setxkbmap
else:
- self.command = xbklayout
+ self.command = xkblayout
def keyboard_layout(self, i3s_output_list, i3s_config):
response = {
|
{"golden_diff": "diff --git a/py3status/modules/keyboard_layout.py b/py3status/modules/keyboard_layout.py\n--- a/py3status/modules/keyboard_layout.py\n+++ b/py3status/modules/keyboard_layout.py\n@@ -30,7 +30,7 @@\n LAYOUT_RE = re.compile(r\".*layout:\\s*(\\w+).*\", flags=re.DOTALL)\n \n \n-def xbklayout():\n+def xkblayout():\n \"\"\"\n check using xkblayout-state (preferred method)\n \"\"\"\n@@ -63,11 +63,11 @@\n find the best implementation to get the keyboard's layout\n \"\"\"\n try:\n- xbklayout()\n+ xkblayout()\n except:\n self.command = setxkbmap\n else:\n- self.command = xbklayout\n+ self.command = xkblayout\n \n def keyboard_layout(self, i3s_output_list, i3s_config):\n response = {\n", "issue": "Typo in the keyboard_layout module\n`xbklayout` function should be `xkblayout`, i.e. \"kb\" instead of \"bk\". This typo appears 3 times in total.\n\nThe rest of the code uses \"kb\" so I assumed what I found was a typo and decided to report it since it already caught my eye.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDisplay the current keyboard layout.\n\nConfiguration parameters:\n - cache_timeout: check for keyboard layout change every seconds\n\nRequires:\n - xkblayout-state\n or\n - setxkbmap\n\n@author shadowprince\n@license Eclipse Public License\n\"\"\"\n\nfrom subprocess import check_output\nfrom time import time\nimport shlex\nimport re\n\n# colors of layouts, check your command's output to match keys\nLANG_COLORS = {\n 'fr': '#268BD2', # solarized blue\n 'ru': '#F75252', # red\n 'ua': '#FCE94F', # yellow\n 'us': '#729FCF', # light blue\n}\n\nLAYOUT_RE = re.compile(r\".*layout:\\s*(\\w+).*\", flags=re.DOTALL)\n\n\ndef xbklayout():\n \"\"\"\n check using xkblayout-state (preferred method)\n \"\"\"\n return check_output(\n [\"xkblayout-state\", \"print\", \"%s\"]\n ).decode('utf-8')\n\n\ndef setxkbmap():\n \"\"\"\n check using setxkbmap >= 1.3.0\n\n Please read issue 33 for more information :\n https://github.com/ultrabug/py3status/pull/33\n \"\"\"\n out = check_output(shlex.split(\"setxkbmap -query\")).decode(\"utf-8\")\n\n return re.match(LAYOUT_RE, out).group(1)\n\n\nclass Py3status:\n \"\"\"\n \"\"\"\n # available configuration parameters\n cache_timeout = 10\n color = ''\n\n def __init__(self):\n \"\"\"\n find the best implementation to get the keyboard's layout\n \"\"\"\n try:\n xbklayout()\n except:\n self.command = setxkbmap\n else:\n self.command = xbklayout\n\n def keyboard_layout(self, i3s_output_list, i3s_config):\n response = {\n 'cached_until': time() + self.cache_timeout,\n 'full_text': ''\n }\n\n lang = self.command().strip()\n lang_color = self.color if self.color else LANG_COLORS.get(lang)\n\n response['full_text'] = lang or '??'\n if lang_color:\n response['color'] = lang_color\n\n return response\n\nif __name__ == \"__main__\":\n \"\"\"\n Test this module by calling it directly.\n \"\"\"\n from time import sleep\n x = Py3status()\n config = {\n 'color_good': '#00FF00',\n 'color_bad': '#FF0000',\n }\n while True:\n print(x.keyboard_layout([], config))\n sleep(1)\n", "path": "py3status/modules/keyboard_layout.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDisplay the current keyboard layout.\n\nConfiguration parameters:\n - cache_timeout: check for keyboard layout change every seconds\n\nRequires:\n - xkblayout-state\n or\n - setxkbmap\n\n@author shadowprince\n@license Eclipse Public License\n\"\"\"\n\nfrom subprocess import check_output\nfrom time import time\nimport shlex\nimport re\n\n# colors of layouts, check your command's output to match keys\nLANG_COLORS = {\n 'fr': '#268BD2', # solarized blue\n 'ru': '#F75252', # red\n 'ua': '#FCE94F', # yellow\n 'us': '#729FCF', # light blue\n}\n\nLAYOUT_RE = re.compile(r\".*layout:\\s*(\\w+).*\", flags=re.DOTALL)\n\n\ndef xkblayout():\n \"\"\"\n check using xkblayout-state (preferred method)\n \"\"\"\n return check_output(\n [\"xkblayout-state\", \"print\", \"%s\"]\n ).decode('utf-8')\n\n\ndef setxkbmap():\n \"\"\"\n check using setxkbmap >= 1.3.0\n\n Please read issue 33 for more information :\n https://github.com/ultrabug/py3status/pull/33\n \"\"\"\n out = check_output(shlex.split(\"setxkbmap -query\")).decode(\"utf-8\")\n\n return re.match(LAYOUT_RE, out).group(1)\n\n\nclass Py3status:\n \"\"\"\n \"\"\"\n # available configuration parameters\n cache_timeout = 10\n color = ''\n\n def __init__(self):\n \"\"\"\n find the best implementation to get the keyboard's layout\n \"\"\"\n try:\n xkblayout()\n except:\n self.command = setxkbmap\n else:\n self.command = xkblayout\n\n def keyboard_layout(self, i3s_output_list, i3s_config):\n response = {\n 'cached_until': time() + self.cache_timeout,\n 'full_text': ''\n }\n\n lang = self.command().strip()\n lang_color = self.color if self.color else LANG_COLORS.get(lang)\n\n response['full_text'] = lang or '??'\n if lang_color:\n response['color'] = lang_color\n\n return response\n\nif __name__ == \"__main__\":\n \"\"\"\n Test this module by calling it directly.\n \"\"\"\n from time import sleep\n x = Py3status()\n config = {\n 'color_good': '#00FF00',\n 'color_bad': '#FF0000',\n }\n while True:\n print(x.keyboard_layout([], config))\n sleep(1)\n", "path": "py3status/modules/keyboard_layout.py"}]}
| 1,130 | 208 |
gh_patches_debug_62154
|
rasdani/github-patches
|
git_diff
|
Parsl__parsl-258
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`globals` should be an optional config field
Running over a config without `globals`, I see:
```
def make_rundir(config=None, path=None):
"""When a path has not been specified, make the run directory.
Creates a rundir with the following hierarchy:
./runinfo <- Home of all run directories
|----000
|----001 <- Directories for each run
| ....
|----NNN
Kwargs:
- path (str): String path to a specific run dir
Default : None.
"""
try:
if not path:
path = None
> elif config["globals"].get('runDir', None):
E KeyError: 'globals'
../dataflow/rundirs.py:25: KeyError
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsl/dataflow/rundirs.py`
Content:
```
1 import os
2 from glob import glob
3 import logging
4
5 logger = logging.getLogger(__name__)
6
7
8 def make_rundir(config=None, path=None):
9 """When a path has not been specified, make the run directory.
10
11 Creates a rundir with the following hierarchy:
12 ./runinfo <- Home of all run directories
13 |----000
14 |----001 <- Directories for each run
15 | ....
16 |----NNN
17
18 Kwargs:
19 - path (str): String path to a specific run dir
20 Default : None.
21 """
22 try:
23 if not path:
24 path = None
25 elif config["globals"].get('runDir', None):
26 path = config["globals"]['runDir']
27
28 if not path:
29 path = "./runinfo"
30
31 if not os.path.exists(path):
32 os.makedirs(path)
33
34 prev_rundirs = glob(os.path.join(path, "[0-9]*"))
35
36 current_rundir = os.path.join(path, '000')
37
38 if prev_rundirs:
39 # Since we globbed on files named as 0-9
40 x = sorted([int(os.path.basename(x)) for x in prev_rundirs])[-1]
41 current_rundir = os.path.join(path, '{0:03}'.format(x + 1))
42
43 os.makedirs(current_rundir)
44 logger.debug("Parsl run initializing in rundir:{0}".format(current_rundir))
45 return os.path.abspath(current_rundir)
46
47 except Exception as e:
48 logger.error("Failed to create a run directory")
49 logger.error("Error: {0}".format(e))
50 exit(-1)
51
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/parsl/dataflow/rundirs.py b/parsl/dataflow/rundirs.py
--- a/parsl/dataflow/rundirs.py
+++ b/parsl/dataflow/rundirs.py
@@ -22,7 +22,7 @@
try:
if not path:
path = None
- elif config["globals"].get('runDir', None):
+ elif config.get("globals", {}).get('runDir'):
path = config["globals"]['runDir']
if not path:
|
{"golden_diff": "diff --git a/parsl/dataflow/rundirs.py b/parsl/dataflow/rundirs.py\n--- a/parsl/dataflow/rundirs.py\n+++ b/parsl/dataflow/rundirs.py\n@@ -22,7 +22,7 @@\n try:\n if not path:\n path = None\n- elif config[\"globals\"].get('runDir', None):\n+ elif config.get(\"globals\", {}).get('runDir'):\n path = config[\"globals\"]['runDir']\n \n if not path:\n", "issue": "`globals` should be an optional config field\nRunning over a config without `globals`, I see:\r\n```\r\n def make_rundir(config=None, path=None):\r\n \"\"\"When a path has not been specified, make the run directory.\r\n\r\n Creates a rundir with the following hierarchy:\r\n ./runinfo <- Home of all run directories\r\n |----000\r\n |----001 <- Directories for each run\r\n | ....\r\n |----NNN\r\n\r\n Kwargs:\r\n - path (str): String path to a specific run dir\r\n Default : None.\r\n \"\"\"\r\n try:\r\n if not path:\r\n path = None\r\n> elif config[\"globals\"].get('runDir', None):\r\nE KeyError: 'globals'\r\n\r\n../dataflow/rundirs.py:25: KeyError\r\n```\n", "before_files": [{"content": "import os\nfrom glob import glob\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef make_rundir(config=None, path=None):\n \"\"\"When a path has not been specified, make the run directory.\n\n Creates a rundir with the following hierarchy:\n ./runinfo <- Home of all run directories\n |----000\n |----001 <- Directories for each run\n | ....\n |----NNN\n\n Kwargs:\n - path (str): String path to a specific run dir\n Default : None.\n \"\"\"\n try:\n if not path:\n path = None\n elif config[\"globals\"].get('runDir', None):\n path = config[\"globals\"]['runDir']\n\n if not path:\n path = \"./runinfo\"\n\n if not os.path.exists(path):\n os.makedirs(path)\n\n prev_rundirs = glob(os.path.join(path, \"[0-9]*\"))\n\n current_rundir = os.path.join(path, '000')\n\n if prev_rundirs:\n # Since we globbed on files named as 0-9\n x = sorted([int(os.path.basename(x)) for x in prev_rundirs])[-1]\n current_rundir = os.path.join(path, '{0:03}'.format(x + 1))\n\n os.makedirs(current_rundir)\n logger.debug(\"Parsl run initializing in rundir:{0}\".format(current_rundir))\n return os.path.abspath(current_rundir)\n\n except Exception as e:\n logger.error(\"Failed to create a run directory\")\n logger.error(\"Error: {0}\".format(e))\n exit(-1)\n", "path": "parsl/dataflow/rundirs.py"}], "after_files": [{"content": "import os\nfrom glob import glob\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef make_rundir(config=None, path=None):\n \"\"\"When a path has not been specified, make the run directory.\n\n Creates a rundir with the following hierarchy:\n ./runinfo <- Home of all run directories\n |----000\n |----001 <- Directories for each run\n | ....\n |----NNN\n\n Kwargs:\n - path (str): String path to a specific run dir\n Default : None.\n \"\"\"\n try:\n if not path:\n path = None\n elif config.get(\"globals\", {}).get('runDir'):\n path = config[\"globals\"]['runDir']\n\n if not path:\n path = \"./runinfo\"\n\n if not os.path.exists(path):\n os.makedirs(path)\n\n prev_rundirs = glob(os.path.join(path, \"[0-9]*\"))\n\n current_rundir = os.path.join(path, '000')\n\n if prev_rundirs:\n # Since we globbed on files named as 0-9\n x = sorted([int(os.path.basename(x)) for x in prev_rundirs])[-1]\n current_rundir = os.path.join(path, '{0:03}'.format(x + 1))\n\n os.makedirs(current_rundir)\n logger.debug(\"Parsl run initializing in rundir:{0}\".format(current_rundir))\n return os.path.abspath(current_rundir)\n\n except Exception as e:\n logger.error(\"Failed to create a run directory\")\n logger.error(\"Error: {0}\".format(e))\n exit(-1)\n", "path": "parsl/dataflow/rundirs.py"}]}
| 898 | 118 |
gh_patches_debug_23081
|
rasdani/github-patches
|
git_diff
|
ManimCommunity__manim-521
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Issue Saving Log files
https://github.com/ManimCommunity/manim/blob/e011f640cf085879b67cce7bc0dc08450ba92d3b/manim/config/config.py#L158-L165
Here it defines that scene name will be log file name. But a problem is when I tried with no scene name and entered it at runtime. The log file a saved in `media/logs/.log` which is weird and should not happen.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `manim/config/config.py`
Content:
```
1 """
2 config.py
3 ---------
4 Process the manim.cfg file and the command line arguments into a single
5 config object.
6 """
7
8
9 __all__ = ["file_writer_config", "config", "camera_config", "tempconfig"]
10
11
12 import os
13 import sys
14 from contextlib import contextmanager
15
16 import colour
17
18 from .. import constants
19 from .config_utils import (
20 _determine_quality,
21 _run_config,
22 _init_dirs,
23 _from_command_line,
24 )
25
26 from .logger import set_rich_logger, set_file_logger, logger
27 from ..utils.tex import TexTemplate, TexTemplateFromFile
28
29 __all__ = ["file_writer_config", "config", "camera_config", "tempconfig"]
30
31
32 config = None
33
34
35 @contextmanager
36 def tempconfig(temp):
37 """Context manager that temporarily modifies the global config dict.
38
39 The code block inside the ``with`` statement will use the modified config.
40 After the code block, the config will be restored to its original value.
41
42 Parameters
43 ----------
44
45 temp : :class:`dict`
46 A dictionary whose keys will be used to temporarily update the global
47 config.
48
49 Examples
50 --------
51 Use ``with tempconfig({...})`` to temporarily change the default values of
52 certain objects.
53
54 .. code_block:: python
55
56 c = Camera()
57 c.frame_width == config['frame_width'] # -> True
58 with tempconfig({'frame_width': 100}):
59 c = Camera()
60 c.frame_width == config['frame_width'] # -> False
61 c.frame_width == 100 # -> True
62
63 """
64 global config
65 original = config.copy()
66
67 temp = {k: v for k, v in temp.items() if k in original}
68
69 # In order to change the config that every module has acces to, use
70 # update(), DO NOT use assignment. Assigning config = some_dict will just
71 # make the local variable named config point to a new dictionary, it will
72 # NOT change the dictionary that every module has a reference to.
73 config.update(temp)
74 try:
75 yield
76 finally:
77 config.update(original) # update, not assignment!
78
79
80 def _parse_config(config_parser, args):
81 """Parse config files and CLI arguments into a single dictionary."""
82 # By default, use the CLI section of the digested .cfg files
83 default = config_parser["CLI"]
84
85 # Handle the *_quality flags. These determine the section to read
86 # and are stored in 'camera_config'. Note the highest resolution
87 # passed as argument will be used.
88 quality = _determine_quality(args)
89 section = config_parser[quality if quality != "production" else "CLI"]
90
91 # Loop over low quality for the keys, could be any quality really
92 config = {opt: section.getint(opt) for opt in config_parser["low_quality"]}
93
94 config["default_pixel_height"] = default.getint("pixel_height")
95 config["default_pixel_width"] = default.getint("pixel_width")
96 # The -r, --resolution flag overrides the *_quality flags
97 if args.resolution is not None:
98 if "," in args.resolution:
99 height_str, width_str = args.resolution.split(",")
100 height, width = int(height_str), int(width_str)
101 else:
102 height = int(args.resolution)
103 width = int(16 * height / 9)
104 config.update({"pixel_height": height, "pixel_width": width})
105
106 # Handle the -c (--background_color) flag
107 if args.background_color is not None:
108 try:
109 background_color = colour.Color(args.background_color)
110 except AttributeError as err:
111 logger.warning("Please use a valid color.")
112 logger.error(err)
113 sys.exit(2)
114 else:
115 background_color = colour.Color(default["background_color"])
116 config["background_color"] = background_color
117
118 config["use_js_renderer"] = args.use_js_renderer or default.getboolean(
119 "use_js_renderer"
120 )
121
122 config["js_renderer_path"] = args.js_renderer_path or default.get(
123 "js_renderer_path"
124 )
125
126 # Set the rest of the frame properties
127 config["frame_height"] = 8.0
128 config["frame_width"] = (
129 config["frame_height"] * config["pixel_width"] / config["pixel_height"]
130 )
131 config["frame_y_radius"] = config["frame_height"] / 2
132 config["frame_x_radius"] = config["frame_width"] / 2
133 config["top"] = config["frame_y_radius"] * constants.UP
134 config["bottom"] = config["frame_y_radius"] * constants.DOWN
135 config["left_side"] = config["frame_x_radius"] * constants.LEFT
136 config["right_side"] = config["frame_x_radius"] * constants.RIGHT
137
138 # Handle the --tex_template flag. Note we accept None if the flag is absent
139 tex_fn = os.path.expanduser(args.tex_template) if args.tex_template else None
140
141 if tex_fn is not None and not os.access(tex_fn, os.R_OK):
142 # custom template not available, fallback to default
143 logger.warning(
144 f"Custom TeX template {tex_fn} not found or not readable. "
145 "Falling back to the default template."
146 )
147 tex_fn = None
148 config["tex_template_file"] = tex_fn
149 config["tex_template"] = (
150 TexTemplateFromFile(filename=tex_fn) if tex_fn is not None else TexTemplate()
151 )
152
153 return config
154
155
156 args, config_parser, file_writer_config, successfully_read_files = _run_config()
157 logger.setLevel(file_writer_config["verbosity"])
158 set_rich_logger(config_parser["logger"], file_writer_config["verbosity"])
159
160 if _from_command_line():
161 logger.debug(
162 f"Read configuration files: {[os.path.abspath(cfgfile) for cfgfile in successfully_read_files]}"
163 )
164 if not (hasattr(args, "subcommands")):
165 _init_dirs(file_writer_config)
166 config = _parse_config(config_parser, args)
167 if config["use_js_renderer"]:
168 file_writer_config["disable_caching"] = True
169 camera_config = config
170
171 if file_writer_config["log_to_file"]:
172 # IMPORTANT note about file name : The log file name will be the scene_name get from the args (contained in file_writer_config). So it can differ from the real name of the scene.
173 log_file_path = os.path.join(
174 file_writer_config["log_dir"],
175 "".join(file_writer_config["scene_names"]) + ".log",
176 )
177 set_file_logger(log_file_path)
178 logger.info("Log file wil be saved in %(logpath)s", {"logpath": log_file_path})
179
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/manim/config/config.py b/manim/config/config.py
--- a/manim/config/config.py
+++ b/manim/config/config.py
@@ -156,10 +156,19 @@
# Set the different loggers
set_rich_logger(config_parser["logger"], file_writer_config["verbosity"])
if file_writer_config["log_to_file"]:
- # IMPORTANT note about file name : The log file name will be the scene_name get from the args (contained in file_writer_config). So it can differ from the real name of the scene.
+ # Note about log_file_name : The log file name will be the <name_of_animation_file>_<name_of_scene>.log
+ # get from the args (contained in file_writer_config). So it can differ from the real name of the scene.
+ # <name_of_scene> would only appear if scene name was provided on manim call
+ scene_name_suffix = "".join(file_writer_config["scene_names"])
+ scene_file_name = os.path.basename(args.file).split(".")[0]
+ log_file_name = (
+ f"{scene_file_name}_{scene_name_suffix}.log"
+ if scene_name_suffix
+ else f"{scene_file_name}.log"
+ )
log_file_path = os.path.join(
file_writer_config["log_dir"],
- "".join(file_writer_config["scene_names"]) + ".log",
+ log_file_name,
)
set_file_logger(log_file_path)
- logger.info("Log file wil be saved in %(logpath)s", {"logpath": log_file_path})
+ logger.info("Log file will be saved in %(logpath)s", {"logpath": log_file_path})
|
{"golden_diff": "diff --git a/manim/config/config.py b/manim/config/config.py\n--- a/manim/config/config.py\n+++ b/manim/config/config.py\n@@ -156,10 +156,19 @@\n # Set the different loggers\n set_rich_logger(config_parser[\"logger\"], file_writer_config[\"verbosity\"])\n if file_writer_config[\"log_to_file\"]:\n- # IMPORTANT note about file name : The log file name will be the scene_name get from the args (contained in file_writer_config). So it can differ from the real name of the scene.\n+ # Note about log_file_name : The log file name will be the <name_of_animation_file>_<name_of_scene>.log\n+ # get from the args (contained in file_writer_config). So it can differ from the real name of the scene.\n+ # <name_of_scene> would only appear if scene name was provided on manim call\n+ scene_name_suffix = \"\".join(file_writer_config[\"scene_names\"])\n+ scene_file_name = os.path.basename(args.file).split(\".\")[0]\n+ log_file_name = (\n+ f\"{scene_file_name}_{scene_name_suffix}.log\"\n+ if scene_name_suffix\n+ else f\"{scene_file_name}.log\"\n+ )\n log_file_path = os.path.join(\n file_writer_config[\"log_dir\"],\n- \"\".join(file_writer_config[\"scene_names\"]) + \".log\",\n+ log_file_name,\n )\n set_file_logger(log_file_path)\n- logger.info(\"Log file wil be saved in %(logpath)s\", {\"logpath\": log_file_path})\n+ logger.info(\"Log file will be saved in %(logpath)s\", {\"logpath\": log_file_path})\n", "issue": "Issue Saving Log files\nhttps://github.com/ManimCommunity/manim/blob/e011f640cf085879b67cce7bc0dc08450ba92d3b/manim/config/config.py#L158-L165\r\nHere it defines that scene name will be log file name. But a problem is when I tried with no scene name and entered it at runtime. The log file a saved in `media/logs/.log` which is weird and should not happen.\n", "before_files": [{"content": "\"\"\"\nconfig.py\n---------\nProcess the manim.cfg file and the command line arguments into a single\nconfig object.\n\"\"\"\n\n\n__all__ = [\"file_writer_config\", \"config\", \"camera_config\", \"tempconfig\"]\n\n\nimport os\nimport sys\nfrom contextlib import contextmanager\n\nimport colour\n\nfrom .. import constants\nfrom .config_utils import (\n _determine_quality,\n _run_config,\n _init_dirs,\n _from_command_line,\n)\n\nfrom .logger import set_rich_logger, set_file_logger, logger\nfrom ..utils.tex import TexTemplate, TexTemplateFromFile\n\n__all__ = [\"file_writer_config\", \"config\", \"camera_config\", \"tempconfig\"]\n\n\nconfig = None\n\n\n@contextmanager\ndef tempconfig(temp):\n \"\"\"Context manager that temporarily modifies the global config dict.\n\n The code block inside the ``with`` statement will use the modified config.\n After the code block, the config will be restored to its original value.\n\n Parameters\n ----------\n\n temp : :class:`dict`\n A dictionary whose keys will be used to temporarily update the global\n config.\n\n Examples\n --------\n Use ``with tempconfig({...})`` to temporarily change the default values of\n certain objects.\n\n .. code_block:: python\n\n c = Camera()\n c.frame_width == config['frame_width'] # -> True\n with tempconfig({'frame_width': 100}):\n c = Camera()\n c.frame_width == config['frame_width'] # -> False\n c.frame_width == 100 # -> True\n\n \"\"\"\n global config\n original = config.copy()\n\n temp = {k: v for k, v in temp.items() if k in original}\n\n # In order to change the config that every module has acces to, use\n # update(), DO NOT use assignment. Assigning config = some_dict will just\n # make the local variable named config point to a new dictionary, it will\n # NOT change the dictionary that every module has a reference to.\n config.update(temp)\n try:\n yield\n finally:\n config.update(original) # update, not assignment!\n\n\ndef _parse_config(config_parser, args):\n \"\"\"Parse config files and CLI arguments into a single dictionary.\"\"\"\n # By default, use the CLI section of the digested .cfg files\n default = config_parser[\"CLI\"]\n\n # Handle the *_quality flags. These determine the section to read\n # and are stored in 'camera_config'. Note the highest resolution\n # passed as argument will be used.\n quality = _determine_quality(args)\n section = config_parser[quality if quality != \"production\" else \"CLI\"]\n\n # Loop over low quality for the keys, could be any quality really\n config = {opt: section.getint(opt) for opt in config_parser[\"low_quality\"]}\n\n config[\"default_pixel_height\"] = default.getint(\"pixel_height\")\n config[\"default_pixel_width\"] = default.getint(\"pixel_width\")\n # The -r, --resolution flag overrides the *_quality flags\n if args.resolution is not None:\n if \",\" in args.resolution:\n height_str, width_str = args.resolution.split(\",\")\n height, width = int(height_str), int(width_str)\n else:\n height = int(args.resolution)\n width = int(16 * height / 9)\n config.update({\"pixel_height\": height, \"pixel_width\": width})\n\n # Handle the -c (--background_color) flag\n if args.background_color is not None:\n try:\n background_color = colour.Color(args.background_color)\n except AttributeError as err:\n logger.warning(\"Please use a valid color.\")\n logger.error(err)\n sys.exit(2)\n else:\n background_color = colour.Color(default[\"background_color\"])\n config[\"background_color\"] = background_color\n\n config[\"use_js_renderer\"] = args.use_js_renderer or default.getboolean(\n \"use_js_renderer\"\n )\n\n config[\"js_renderer_path\"] = args.js_renderer_path or default.get(\n \"js_renderer_path\"\n )\n\n # Set the rest of the frame properties\n config[\"frame_height\"] = 8.0\n config[\"frame_width\"] = (\n config[\"frame_height\"] * config[\"pixel_width\"] / config[\"pixel_height\"]\n )\n config[\"frame_y_radius\"] = config[\"frame_height\"] / 2\n config[\"frame_x_radius\"] = config[\"frame_width\"] / 2\n config[\"top\"] = config[\"frame_y_radius\"] * constants.UP\n config[\"bottom\"] = config[\"frame_y_radius\"] * constants.DOWN\n config[\"left_side\"] = config[\"frame_x_radius\"] * constants.LEFT\n config[\"right_side\"] = config[\"frame_x_radius\"] * constants.RIGHT\n\n # Handle the --tex_template flag. Note we accept None if the flag is absent\n tex_fn = os.path.expanduser(args.tex_template) if args.tex_template else None\n\n if tex_fn is not None and not os.access(tex_fn, os.R_OK):\n # custom template not available, fallback to default\n logger.warning(\n f\"Custom TeX template {tex_fn} not found or not readable. \"\n \"Falling back to the default template.\"\n )\n tex_fn = None\n config[\"tex_template_file\"] = tex_fn\n config[\"tex_template\"] = (\n TexTemplateFromFile(filename=tex_fn) if tex_fn is not None else TexTemplate()\n )\n\n return config\n\n\nargs, config_parser, file_writer_config, successfully_read_files = _run_config()\nlogger.setLevel(file_writer_config[\"verbosity\"])\nset_rich_logger(config_parser[\"logger\"], file_writer_config[\"verbosity\"])\n\nif _from_command_line():\n logger.debug(\n f\"Read configuration files: {[os.path.abspath(cfgfile) for cfgfile in successfully_read_files]}\"\n )\n if not (hasattr(args, \"subcommands\")):\n _init_dirs(file_writer_config)\nconfig = _parse_config(config_parser, args)\nif config[\"use_js_renderer\"]:\n file_writer_config[\"disable_caching\"] = True\ncamera_config = config\n\nif file_writer_config[\"log_to_file\"]:\n # IMPORTANT note about file name : The log file name will be the scene_name get from the args (contained in file_writer_config). So it can differ from the real name of the scene.\n log_file_path = os.path.join(\n file_writer_config[\"log_dir\"],\n \"\".join(file_writer_config[\"scene_names\"]) + \".log\",\n )\n set_file_logger(log_file_path)\n logger.info(\"Log file wil be saved in %(logpath)s\", {\"logpath\": log_file_path})\n", "path": "manim/config/config.py"}], "after_files": [{"content": "\"\"\"\nconfig.py\n---------\nProcess the manim.cfg file and the command line arguments into a single\nconfig object.\n\"\"\"\n\n\n__all__ = [\"file_writer_config\", \"config\", \"camera_config\", \"tempconfig\"]\n\n\nimport os\nimport sys\nfrom contextlib import contextmanager\n\nimport colour\n\nfrom .. import constants\nfrom .config_utils import _run_config, _init_dirs, _from_command_line\n\nfrom .logger import set_rich_logger, set_file_logger, logger\nfrom ..utils.tex import TexTemplate, TexTemplateFromFile\n\n__all__ = [\"file_writer_config\", \"config\", \"camera_config\", \"tempconfig\"]\n\n\nconfig = None\n\n\n@contextmanager\ndef tempconfig(temp):\n \"\"\"Context manager that temporarily modifies the global config dict.\n\n The code block inside the ``with`` statement will use the modified config.\n After the code block, the config will be restored to its original value.\n\n Parameters\n ----------\n\n temp : :class:`dict`\n A dictionary whose keys will be used to temporarily update the global\n config.\n\n Examples\n --------\n Use ``with tempconfig({...})`` to temporarily change the default values of\n certain objects.\n\n .. code_block:: python\n\n c = Camera()\n c.frame_width == config['frame_width'] # -> True\n with tempconfig({'frame_width': 100}):\n c = Camera()\n c.frame_width == config['frame_width'] # -> False\n c.frame_width == 100 # -> True\n\n \"\"\"\n global config\n original = config.copy()\n\n temp = {k: v for k, v in temp.items() if k in original}\n\n # In order to change the config that every module has acces to, use\n # update(), DO NOT use assignment. Assigning config = some_dict will just\n # make the local variable named config point to a new dictionary, it will\n # NOT change the dictionary that every module has a reference to.\n config.update(temp)\n try:\n yield\n finally:\n config.update(original) # update, not assignment!\n\n\ndef _parse_config(config_parser, args):\n \"\"\"Parse config files and CLI arguments into a single dictionary.\"\"\"\n # By default, use the CLI section of the digested .cfg files\n default = config_parser[\"CLI\"]\n\n # Handle the *_quality flags. These determine the section to read\n # and are stored in 'camera_config'. Note the highest resolution\n # passed as argument will be used.\n for flag in [\"fourk_quality\", \"high_quality\", \"medium_quality\", \"low_quality\"]:\n if getattr(args, flag):\n section = config_parser[flag]\n break\n else:\n section = config_parser[\"CLI\"]\n config = {opt: section.getint(opt) for opt in config_parser[flag]}\n\n config[\"default_pixel_height\"] = default.getint(\"pixel_height\")\n config[\"default_pixel_width\"] = default.getint(\"pixel_width\")\n # The -r, --resolution flag overrides the *_quality flags\n if args.resolution is not None:\n if \",\" in args.resolution:\n height_str, width_str = args.resolution.split(\",\")\n height, width = int(height_str), int(width_str)\n else:\n height = int(args.resolution)\n width = int(16 * height / 9)\n config.update({\"pixel_height\": height, \"pixel_width\": width})\n\n # Handle the -c (--background_color) flag\n if args.background_color is not None:\n try:\n background_color = colour.Color(args.background_color)\n except AttributeError as err:\n logger.warning(\"Please use a valid color.\")\n logger.error(err)\n sys.exit(2)\n else:\n background_color = colour.Color(default[\"background_color\"])\n config[\"background_color\"] = background_color\n\n # Set the rest of the frame properties\n config[\"frame_height\"] = 8.0\n config[\"frame_width\"] = (\n config[\"frame_height\"] * config[\"pixel_width\"] / config[\"pixel_height\"]\n )\n config[\"frame_y_radius\"] = config[\"frame_height\"] / 2\n config[\"frame_x_radius\"] = config[\"frame_width\"] / 2\n config[\"top\"] = config[\"frame_y_radius\"] * constants.UP\n config[\"bottom\"] = config[\"frame_y_radius\"] * constants.DOWN\n config[\"left_side\"] = config[\"frame_x_radius\"] * constants.LEFT\n config[\"right_side\"] = config[\"frame_x_radius\"] * constants.RIGHT\n\n # Handle the --tex_template flag. Note we accept None if the flag is absent\n tex_fn = os.path.expanduser(args.tex_template) if args.tex_template else None\n\n if tex_fn is not None and not os.access(tex_fn, os.R_OK):\n # custom template not available, fallback to default\n logger.warning(\n f\"Custom TeX template {tex_fn} not found or not readable. \"\n \"Falling back to the default template.\"\n )\n tex_fn = None\n config[\"tex_template_file\"] = tex_fn\n config[\"tex_template\"] = (\n TexTemplateFromFile(filename=tex_fn) if tex_fn is not None else TexTemplate()\n )\n\n return config\n\n\nargs, config_parser, file_writer_config, successfully_read_files = _run_config()\nlogger.setLevel(file_writer_config[\"verbosity\"])\nif _from_command_line():\n logger.debug(\n f\"Read configuration files: {[os.path.abspath(cfgfile) for cfgfile in successfully_read_files]}\"\n )\n if not (hasattr(args, \"subcommands\")):\n _init_dirs(file_writer_config)\nconfig = _parse_config(config_parser, args)\ncamera_config = config\n\n# Set the different loggers\nset_rich_logger(config_parser[\"logger\"], file_writer_config[\"verbosity\"])\nif file_writer_config[\"log_to_file\"]:\n # Note about log_file_name : The log file name will be the <name_of_animation_file>_<name_of_scene>.log\n # get from the args (contained in file_writer_config). So it can differ from the real name of the scene.\n # <name_of_scene> would only appear if scene name was provided on manim call\n scene_name_suffix = \"\".join(file_writer_config[\"scene_names\"])\n scene_file_name = os.path.basename(args.file).split(\".\")[0]\n log_file_name = (\n f\"{scene_file_name}_{scene_name_suffix}.log\"\n if scene_name_suffix\n else f\"{scene_file_name}.log\"\n )\n log_file_path = os.path.join(\n file_writer_config[\"log_dir\"],\n log_file_name,\n )\n set_file_logger(log_file_path)\n logger.info(\"Log file will be saved in %(logpath)s\", {\"logpath\": log_file_path})\n", "path": "manim/config/config.py"}]}
| 2,245 | 366 |
gh_patches_debug_9158
|
rasdani/github-patches
|
git_diff
|
boto__boto-2029
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
LaunchConfiguration does not retrieve AssociatePublicIpAddress properly
Pull request #1799 added support to `AssociatePublicIpAddress`, but it added only support for sending that parameter. Retrieval is not fully supported yet.
A simple fix would be grabbing what pull request #1832 implemented: https://github.com/boto/boto/pull/1832/files#diff-8c9af36969b22e4d4bb34924adc35399R105
My launch configuration object:
```
>>> launch_config.AssociatePublicIpAddress
u'true'
>>> launch_config.__class__
<class 'boto.ec2.autoscale.launchconfig.LaunchConfiguration'>
>>> pprint(dir(launch_config))
['AssociatePublicIpAddress',
'__class__',
'__delattr__',
'__dict__',
'__doc__',
'__format__',
'__getattribute__',
'__hash__',
'__init__',
'__module__',
'__new__',
'__reduce__',
'__reduce_ex__',
'__repr__',
'__setattr__',
'__sizeof__',
'__str__',
'__subclasshook__',
'__weakref__',
'associate_public_ip_address',
'block_device_mappings',
'connection',
'created_time',
'delete',
'delete_on_termination',
'ebs_optimized',
'endElement',
'image_id',
'instance_monitoring',
'instance_profile_name',
'instance_type',
'iops',
'kernel_id',
'key_name',
'launch_configuration_arn',
'member',
'name',
'ramdisk_id',
'security_groups',
'spot_price',
'startElement',
'user_data',
'volume_type']
```
I am using boto version 2.23.0.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `boto/ec2/autoscale/launchconfig.py`
Content:
```
1 # Copyright (c) 2009 Reza Lotun http://reza.lotun.name/
2 # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining a
5 # copy of this software and associated documentation files (the
6 # "Software"), to deal in the Software without restriction, including
7 # without limitation the rights to use, copy, modify, merge, publish, dis-
8 # tribute, sublicense, and/or sell copies of the Software, and to permit
9 # persons to whom the Software is furnished to do so, subject to the fol-
10 # lowing conditions:
11 #
12 # The above copyright notice and this permission notice shall be included
13 # in all copies or substantial portions of the Software.
14 #
15 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
17 # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
18 # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
19 # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 # IN THE SOFTWARE.
22
23 from datetime import datetime
24 from boto.resultset import ResultSet
25 from boto.ec2.elb.listelement import ListElement
26 import boto.utils
27 import base64
28
29 # this should use the corresponding object from boto.ec2
30
31
32 class Ebs(object):
33 def __init__(self, connection=None, snapshot_id=None, volume_size=None):
34 self.connection = connection
35 self.snapshot_id = snapshot_id
36 self.volume_size = volume_size
37
38 def __repr__(self):
39 return 'Ebs(%s, %s)' % (self.snapshot_id, self.volume_size)
40
41 def startElement(self, name, attrs, connection):
42 pass
43
44 def endElement(self, name, value, connection):
45 if name == 'SnapshotId':
46 self.snapshot_id = value
47 elif name == 'VolumeSize':
48 self.volume_size = value
49
50
51 class InstanceMonitoring(object):
52 def __init__(self, connection=None, enabled='false'):
53 self.connection = connection
54 self.enabled = enabled
55
56 def __repr__(self):
57 return 'InstanceMonitoring(%s)' % self.enabled
58
59 def startElement(self, name, attrs, connection):
60 pass
61
62 def endElement(self, name, value, connection):
63 if name == 'Enabled':
64 self.enabled = value
65
66
67 # this should use the BlockDeviceMapping from boto.ec2.blockdevicemapping
68 class BlockDeviceMapping(object):
69 def __init__(self, connection=None, device_name=None, virtual_name=None,
70 ebs=None, no_device=None):
71 self.connection = connection
72 self.device_name = device_name
73 self.virtual_name = virtual_name
74 self.ebs = ebs
75 self.no_device = no_device
76
77 def __repr__(self):
78 return 'BlockDeviceMapping(%s, %s)' % (self.device_name,
79 self.virtual_name)
80
81 def startElement(self, name, attrs, connection):
82 if name == 'Ebs':
83 self.ebs = Ebs(self)
84 return self.ebs
85
86 def endElement(self, name, value, connection):
87 if name == 'DeviceName':
88 self.device_name = value
89 elif name == 'VirtualName':
90 self.virtual_name = value
91 elif name == 'NoDevice':
92 self.no_device = bool(value)
93
94
95 class LaunchConfiguration(object):
96 def __init__(self, connection=None, name=None, image_id=None,
97 key_name=None, security_groups=None, user_data=None,
98 instance_type='m1.small', kernel_id=None,
99 ramdisk_id=None, block_device_mappings=None,
100 instance_monitoring=False, spot_price=None,
101 instance_profile_name=None, ebs_optimized=False,
102 associate_public_ip_address=None, volume_type=None,
103 delete_on_termination=True, iops=None):
104 """
105 A launch configuration.
106
107 :type name: str
108 :param name: Name of the launch configuration to create.
109
110 :type image_id: str
111 :param image_id: Unique ID of the Amazon Machine Image (AMI) which was
112 assigned during registration.
113
114 :type key_name: str
115 :param key_name: The name of the EC2 key pair.
116
117 :type security_groups: list
118 :param security_groups: Names or security group id's of the security
119 groups with which to associate the EC2 instances or VPC instances,
120 respectively.
121
122 :type user_data: str
123 :param user_data: The user data available to launched EC2 instances.
124
125 :type instance_type: str
126 :param instance_type: The instance type
127
128 :type kern_id: str
129 :param kern_id: Kernel id for instance
130
131 :type ramdisk_id: str
132 :param ramdisk_id: RAM disk id for instance
133
134 :type block_device_mappings: list
135 :param block_device_mappings: Specifies how block devices are exposed
136 for instances
137
138 :type instance_monitoring: bool
139 :param instance_monitoring: Whether instances in group are launched
140 with detailed monitoring.
141
142 :type spot_price: float
143 :param spot_price: The spot price you are bidding. Only applies
144 if you are building an autoscaling group with spot instances.
145
146 :type instance_profile_name: string
147 :param instance_profile_name: The name or the Amazon Resource
148 Name (ARN) of the instance profile associated with the IAM
149 role for the instance.
150
151 :type ebs_optimized: bool
152 :param ebs_optimized: Specifies whether the instance is optimized
153 for EBS I/O (true) or not (false).
154
155 :type associate_public_ip_address: bool
156 :param associate_public_ip_address: Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud.
157 Specifies whether to assign a public IP address to each instance launched in a Amazon VPC.
158 """
159 self.connection = connection
160 self.name = name
161 self.instance_type = instance_type
162 self.block_device_mappings = block_device_mappings
163 self.key_name = key_name
164 sec_groups = security_groups or []
165 self.security_groups = ListElement(sec_groups)
166 self.image_id = image_id
167 self.ramdisk_id = ramdisk_id
168 self.created_time = None
169 self.kernel_id = kernel_id
170 self.user_data = user_data
171 self.created_time = None
172 self.instance_monitoring = instance_monitoring
173 self.spot_price = spot_price
174 self.instance_profile_name = instance_profile_name
175 self.launch_configuration_arn = None
176 self.ebs_optimized = ebs_optimized
177 self.associate_public_ip_address = associate_public_ip_address
178 self.volume_type = volume_type
179 self.delete_on_termination = delete_on_termination
180 self.iops = iops
181
182 def __repr__(self):
183 return 'LaunchConfiguration:%s' % self.name
184
185 def startElement(self, name, attrs, connection):
186 if name == 'SecurityGroups':
187 return self.security_groups
188 elif name == 'BlockDeviceMappings':
189 self.block_device_mappings = ResultSet([('member',
190 BlockDeviceMapping)])
191 return self.block_device_mappings
192 elif name == 'InstanceMonitoring':
193 self.instance_monitoring = InstanceMonitoring(self)
194 return self.instance_monitoring
195
196 def endElement(self, name, value, connection):
197 if name == 'InstanceType':
198 self.instance_type = value
199 elif name == 'LaunchConfigurationName':
200 self.name = value
201 elif name == 'KeyName':
202 self.key_name = value
203 elif name == 'ImageId':
204 self.image_id = value
205 elif name == 'CreatedTime':
206 self.created_time = boto.utils.parse_ts(value)
207 elif name == 'KernelId':
208 self.kernel_id = value
209 elif name == 'RamdiskId':
210 self.ramdisk_id = value
211 elif name == 'UserData':
212 try:
213 self.user_data = base64.b64decode(value)
214 except TypeError:
215 self.user_data = value
216 elif name == 'LaunchConfigurationARN':
217 self.launch_configuration_arn = value
218 elif name == 'InstanceMonitoring':
219 self.instance_monitoring = value
220 elif name == 'SpotPrice':
221 self.spot_price = float(value)
222 elif name == 'IamInstanceProfile':
223 self.instance_profile_name = value
224 elif name == 'EbsOptimized':
225 self.ebs_optimized = True if value.lower() == 'true' else False
226 elif name == 'VolumeType':
227 self.volume_type = value
228 elif name == 'DeleteOnTermination':
229 if value.lower() == 'true':
230 self.delete_on_termination = True
231 else:
232 self.delete_on_termination = False
233 elif name == 'Iops':
234 self.iops = int(value)
235 else:
236 setattr(self, name, value)
237
238 def delete(self):
239 """ Delete this launch configuration. """
240 return self.connection.delete_launch_configuration(self.name)
241
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/boto/ec2/autoscale/launchconfig.py b/boto/ec2/autoscale/launchconfig.py
--- a/boto/ec2/autoscale/launchconfig.py
+++ b/boto/ec2/autoscale/launchconfig.py
@@ -223,6 +223,8 @@
self.instance_profile_name = value
elif name == 'EbsOptimized':
self.ebs_optimized = True if value.lower() == 'true' else False
+ elif name == 'AssociatePublicIpAddress':
+ self.associate_public_ip_address = True if value.lower() == 'true' else False
elif name == 'VolumeType':
self.volume_type = value
elif name == 'DeleteOnTermination':
|
{"golden_diff": "diff --git a/boto/ec2/autoscale/launchconfig.py b/boto/ec2/autoscale/launchconfig.py\n--- a/boto/ec2/autoscale/launchconfig.py\n+++ b/boto/ec2/autoscale/launchconfig.py\n@@ -223,6 +223,8 @@\n self.instance_profile_name = value\n elif name == 'EbsOptimized':\n self.ebs_optimized = True if value.lower() == 'true' else False\n+ elif name == 'AssociatePublicIpAddress':\n+ self.associate_public_ip_address = True if value.lower() == 'true' else False\n elif name == 'VolumeType':\n self.volume_type = value\n elif name == 'DeleteOnTermination':\n", "issue": "LaunchConfiguration does not retrieve AssociatePublicIpAddress properly\nPull request #1799 added support to `AssociatePublicIpAddress`, but it added only support for sending that parameter. Retrieval is not fully supported yet.\n\nA simple fix would be grabbing what pull request #1832 implemented: https://github.com/boto/boto/pull/1832/files#diff-8c9af36969b22e4d4bb34924adc35399R105\n\nMy launch configuration object:\n\n```\n>>> launch_config.AssociatePublicIpAddress\nu'true'\n\n>>> launch_config.__class__\n<class 'boto.ec2.autoscale.launchconfig.LaunchConfiguration'>\n>>> pprint(dir(launch_config))\n['AssociatePublicIpAddress',\n '__class__',\n '__delattr__',\n '__dict__',\n '__doc__',\n '__format__',\n '__getattribute__',\n '__hash__',\n '__init__',\n '__module__',\n '__new__',\n '__reduce__',\n '__reduce_ex__',\n '__repr__',\n '__setattr__',\n '__sizeof__',\n '__str__',\n '__subclasshook__',\n '__weakref__',\n 'associate_public_ip_address',\n 'block_device_mappings',\n 'connection',\n 'created_time',\n 'delete',\n 'delete_on_termination',\n 'ebs_optimized',\n 'endElement',\n 'image_id',\n 'instance_monitoring',\n 'instance_profile_name',\n 'instance_type',\n 'iops',\n 'kernel_id',\n 'key_name',\n 'launch_configuration_arn',\n 'member',\n 'name',\n 'ramdisk_id',\n 'security_groups',\n 'spot_price',\n 'startElement',\n 'user_data',\n 'volume_type']\n```\n\nI am using boto version 2.23.0.\n\n", "before_files": [{"content": "# Copyright (c) 2009 Reza Lotun http://reza.lotun.name/\n# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\nfrom datetime import datetime\nfrom boto.resultset import ResultSet\nfrom boto.ec2.elb.listelement import ListElement\nimport boto.utils\nimport base64\n\n# this should use the corresponding object from boto.ec2\n\n\nclass Ebs(object):\n def __init__(self, connection=None, snapshot_id=None, volume_size=None):\n self.connection = connection\n self.snapshot_id = snapshot_id\n self.volume_size = volume_size\n\n def __repr__(self):\n return 'Ebs(%s, %s)' % (self.snapshot_id, self.volume_size)\n\n def startElement(self, name, attrs, connection):\n pass\n\n def endElement(self, name, value, connection):\n if name == 'SnapshotId':\n self.snapshot_id = value\n elif name == 'VolumeSize':\n self.volume_size = value\n\n\nclass InstanceMonitoring(object):\n def __init__(self, connection=None, enabled='false'):\n self.connection = connection\n self.enabled = enabled\n\n def __repr__(self):\n return 'InstanceMonitoring(%s)' % self.enabled\n\n def startElement(self, name, attrs, connection):\n pass\n\n def endElement(self, name, value, connection):\n if name == 'Enabled':\n self.enabled = value\n\n\n# this should use the BlockDeviceMapping from boto.ec2.blockdevicemapping\nclass BlockDeviceMapping(object):\n def __init__(self, connection=None, device_name=None, virtual_name=None,\n ebs=None, no_device=None):\n self.connection = connection\n self.device_name = device_name\n self.virtual_name = virtual_name\n self.ebs = ebs\n self.no_device = no_device\n\n def __repr__(self):\n return 'BlockDeviceMapping(%s, %s)' % (self.device_name,\n self.virtual_name)\n\n def startElement(self, name, attrs, connection):\n if name == 'Ebs':\n self.ebs = Ebs(self)\n return self.ebs\n\n def endElement(self, name, value, connection):\n if name == 'DeviceName':\n self.device_name = value\n elif name == 'VirtualName':\n self.virtual_name = value\n elif name == 'NoDevice':\n self.no_device = bool(value)\n\n\nclass LaunchConfiguration(object):\n def __init__(self, connection=None, name=None, image_id=None,\n key_name=None, security_groups=None, user_data=None,\n instance_type='m1.small', kernel_id=None,\n ramdisk_id=None, block_device_mappings=None,\n instance_monitoring=False, spot_price=None,\n instance_profile_name=None, ebs_optimized=False,\n associate_public_ip_address=None, volume_type=None,\n delete_on_termination=True, iops=None):\n \"\"\"\n A launch configuration.\n\n :type name: str\n :param name: Name of the launch configuration to create.\n\n :type image_id: str\n :param image_id: Unique ID of the Amazon Machine Image (AMI) which was\n assigned during registration.\n\n :type key_name: str\n :param key_name: The name of the EC2 key pair.\n\n :type security_groups: list\n :param security_groups: Names or security group id's of the security\n groups with which to associate the EC2 instances or VPC instances,\n respectively.\n\n :type user_data: str\n :param user_data: The user data available to launched EC2 instances.\n\n :type instance_type: str\n :param instance_type: The instance type\n\n :type kern_id: str\n :param kern_id: Kernel id for instance\n\n :type ramdisk_id: str\n :param ramdisk_id: RAM disk id for instance\n\n :type block_device_mappings: list\n :param block_device_mappings: Specifies how block devices are exposed\n for instances\n\n :type instance_monitoring: bool\n :param instance_monitoring: Whether instances in group are launched\n with detailed monitoring.\n\n :type spot_price: float\n :param spot_price: The spot price you are bidding. Only applies\n if you are building an autoscaling group with spot instances.\n\n :type instance_profile_name: string\n :param instance_profile_name: The name or the Amazon Resource\n Name (ARN) of the instance profile associated with the IAM\n role for the instance.\n\n :type ebs_optimized: bool\n :param ebs_optimized: Specifies whether the instance is optimized\n for EBS I/O (true) or not (false).\n\n :type associate_public_ip_address: bool\n :param associate_public_ip_address: Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud.\n Specifies whether to assign a public IP address to each instance launched in a Amazon VPC.\n \"\"\"\n self.connection = connection\n self.name = name\n self.instance_type = instance_type\n self.block_device_mappings = block_device_mappings\n self.key_name = key_name\n sec_groups = security_groups or []\n self.security_groups = ListElement(sec_groups)\n self.image_id = image_id\n self.ramdisk_id = ramdisk_id\n self.created_time = None\n self.kernel_id = kernel_id\n self.user_data = user_data\n self.created_time = None\n self.instance_monitoring = instance_monitoring\n self.spot_price = spot_price\n self.instance_profile_name = instance_profile_name\n self.launch_configuration_arn = None\n self.ebs_optimized = ebs_optimized\n self.associate_public_ip_address = associate_public_ip_address\n self.volume_type = volume_type\n self.delete_on_termination = delete_on_termination\n self.iops = iops\n\n def __repr__(self):\n return 'LaunchConfiguration:%s' % self.name\n\n def startElement(self, name, attrs, connection):\n if name == 'SecurityGroups':\n return self.security_groups\n elif name == 'BlockDeviceMappings':\n self.block_device_mappings = ResultSet([('member',\n BlockDeviceMapping)])\n return self.block_device_mappings\n elif name == 'InstanceMonitoring':\n self.instance_monitoring = InstanceMonitoring(self)\n return self.instance_monitoring\n\n def endElement(self, name, value, connection):\n if name == 'InstanceType':\n self.instance_type = value\n elif name == 'LaunchConfigurationName':\n self.name = value\n elif name == 'KeyName':\n self.key_name = value\n elif name == 'ImageId':\n self.image_id = value\n elif name == 'CreatedTime':\n self.created_time = boto.utils.parse_ts(value)\n elif name == 'KernelId':\n self.kernel_id = value\n elif name == 'RamdiskId':\n self.ramdisk_id = value\n elif name == 'UserData':\n try:\n self.user_data = base64.b64decode(value)\n except TypeError:\n self.user_data = value\n elif name == 'LaunchConfigurationARN':\n self.launch_configuration_arn = value\n elif name == 'InstanceMonitoring':\n self.instance_monitoring = value\n elif name == 'SpotPrice':\n self.spot_price = float(value)\n elif name == 'IamInstanceProfile':\n self.instance_profile_name = value\n elif name == 'EbsOptimized':\n self.ebs_optimized = True if value.lower() == 'true' else False\n elif name == 'VolumeType':\n self.volume_type = value\n elif name == 'DeleteOnTermination':\n if value.lower() == 'true':\n self.delete_on_termination = True\n else:\n self.delete_on_termination = False\n elif name == 'Iops':\n self.iops = int(value)\n else:\n setattr(self, name, value)\n\n def delete(self):\n \"\"\" Delete this launch configuration. \"\"\"\n return self.connection.delete_launch_configuration(self.name)\n", "path": "boto/ec2/autoscale/launchconfig.py"}], "after_files": [{"content": "# Copyright (c) 2009 Reza Lotun http://reza.lotun.name/\n# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\nfrom datetime import datetime\nfrom boto.resultset import ResultSet\nfrom boto.ec2.elb.listelement import ListElement\nimport boto.utils\nimport base64\n\n# this should use the corresponding object from boto.ec2\n\n\nclass Ebs(object):\n def __init__(self, connection=None, snapshot_id=None, volume_size=None):\n self.connection = connection\n self.snapshot_id = snapshot_id\n self.volume_size = volume_size\n\n def __repr__(self):\n return 'Ebs(%s, %s)' % (self.snapshot_id, self.volume_size)\n\n def startElement(self, name, attrs, connection):\n pass\n\n def endElement(self, name, value, connection):\n if name == 'SnapshotId':\n self.snapshot_id = value\n elif name == 'VolumeSize':\n self.volume_size = value\n\n\nclass InstanceMonitoring(object):\n def __init__(self, connection=None, enabled='false'):\n self.connection = connection\n self.enabled = enabled\n\n def __repr__(self):\n return 'InstanceMonitoring(%s)' % self.enabled\n\n def startElement(self, name, attrs, connection):\n pass\n\n def endElement(self, name, value, connection):\n if name == 'Enabled':\n self.enabled = value\n\n\n# this should use the BlockDeviceMapping from boto.ec2.blockdevicemapping\nclass BlockDeviceMapping(object):\n def __init__(self, connection=None, device_name=None, virtual_name=None,\n ebs=None, no_device=None):\n self.connection = connection\n self.device_name = device_name\n self.virtual_name = virtual_name\n self.ebs = ebs\n self.no_device = no_device\n\n def __repr__(self):\n return 'BlockDeviceMapping(%s, %s)' % (self.device_name,\n self.virtual_name)\n\n def startElement(self, name, attrs, connection):\n if name == 'Ebs':\n self.ebs = Ebs(self)\n return self.ebs\n\n def endElement(self, name, value, connection):\n if name == 'DeviceName':\n self.device_name = value\n elif name == 'VirtualName':\n self.virtual_name = value\n elif name == 'NoDevice':\n self.no_device = bool(value)\n\n\nclass LaunchConfiguration(object):\n def __init__(self, connection=None, name=None, image_id=None,\n key_name=None, security_groups=None, user_data=None,\n instance_type='m1.small', kernel_id=None,\n ramdisk_id=None, block_device_mappings=None,\n instance_monitoring=False, spot_price=None,\n instance_profile_name=None, ebs_optimized=False,\n associate_public_ip_address=None, volume_type=None,\n delete_on_termination=True, iops=None):\n \"\"\"\n A launch configuration.\n\n :type name: str\n :param name: Name of the launch configuration to create.\n\n :type image_id: str\n :param image_id: Unique ID of the Amazon Machine Image (AMI) which was\n assigned during registration.\n\n :type key_name: str\n :param key_name: The name of the EC2 key pair.\n\n :type security_groups: list\n :param security_groups: Names or security group id's of the security\n groups with which to associate the EC2 instances or VPC instances,\n respectively.\n\n :type user_data: str\n :param user_data: The user data available to launched EC2 instances.\n\n :type instance_type: str\n :param instance_type: The instance type\n\n :type kern_id: str\n :param kern_id: Kernel id for instance\n\n :type ramdisk_id: str\n :param ramdisk_id: RAM disk id for instance\n\n :type block_device_mappings: list\n :param block_device_mappings: Specifies how block devices are exposed\n for instances\n\n :type instance_monitoring: bool\n :param instance_monitoring: Whether instances in group are launched\n with detailed monitoring.\n\n :type spot_price: float\n :param spot_price: The spot price you are bidding. Only applies\n if you are building an autoscaling group with spot instances.\n\n :type instance_profile_name: string\n :param instance_profile_name: The name or the Amazon Resource\n Name (ARN) of the instance profile associated with the IAM\n role for the instance.\n\n :type ebs_optimized: bool\n :param ebs_optimized: Specifies whether the instance is optimized\n for EBS I/O (true) or not (false).\n\n :type associate_public_ip_address: bool\n :param associate_public_ip_address: Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud.\n Specifies whether to assign a public IP address to each instance launched in a Amazon VPC.\n \"\"\"\n self.connection = connection\n self.name = name\n self.instance_type = instance_type\n self.block_device_mappings = block_device_mappings\n self.key_name = key_name\n sec_groups = security_groups or []\n self.security_groups = ListElement(sec_groups)\n self.image_id = image_id\n self.ramdisk_id = ramdisk_id\n self.created_time = None\n self.kernel_id = kernel_id\n self.user_data = user_data\n self.created_time = None\n self.instance_monitoring = instance_monitoring\n self.spot_price = spot_price\n self.instance_profile_name = instance_profile_name\n self.launch_configuration_arn = None\n self.ebs_optimized = ebs_optimized\n self.associate_public_ip_address = associate_public_ip_address\n self.volume_type = volume_type\n self.delete_on_termination = delete_on_termination\n self.iops = iops\n\n def __repr__(self):\n return 'LaunchConfiguration:%s' % self.name\n\n def startElement(self, name, attrs, connection):\n if name == 'SecurityGroups':\n return self.security_groups\n elif name == 'BlockDeviceMappings':\n self.block_device_mappings = ResultSet([('member',\n BlockDeviceMapping)])\n return self.block_device_mappings\n elif name == 'InstanceMonitoring':\n self.instance_monitoring = InstanceMonitoring(self)\n return self.instance_monitoring\n\n def endElement(self, name, value, connection):\n if name == 'InstanceType':\n self.instance_type = value\n elif name == 'LaunchConfigurationName':\n self.name = value\n elif name == 'KeyName':\n self.key_name = value\n elif name == 'ImageId':\n self.image_id = value\n elif name == 'CreatedTime':\n self.created_time = boto.utils.parse_ts(value)\n elif name == 'KernelId':\n self.kernel_id = value\n elif name == 'RamdiskId':\n self.ramdisk_id = value\n elif name == 'UserData':\n try:\n self.user_data = base64.b64decode(value)\n except TypeError:\n self.user_data = value\n elif name == 'LaunchConfigurationARN':\n self.launch_configuration_arn = value\n elif name == 'InstanceMonitoring':\n self.instance_monitoring = value\n elif name == 'SpotPrice':\n self.spot_price = float(value)\n elif name == 'IamInstanceProfile':\n self.instance_profile_name = value\n elif name == 'EbsOptimized':\n self.ebs_optimized = True if value.lower() == 'true' else False\n elif name == 'AssociatePublicIpAddress':\n self.associate_public_ip_address = True if value.lower() == 'true' else False\n elif name == 'VolumeType':\n self.volume_type = value\n elif name == 'DeleteOnTermination':\n if value.lower() == 'true':\n self.delete_on_termination = True\n else:\n self.delete_on_termination = False\n elif name == 'Iops':\n self.iops = int(value)\n else:\n setattr(self, name, value)\n\n def delete(self):\n \"\"\" Delete this launch configuration. \"\"\"\n return self.connection.delete_launch_configuration(self.name)\n", "path": "boto/ec2/autoscale/launchconfig.py"}]}
| 3,224 | 163 |
gh_patches_debug_42808
|
rasdani/github-patches
|
git_diff
|
comfyanonymous__ComfyUI-413
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CORS support in backend
I want to test a frontend on a different port than the backend on `localhost`, but get blocked by CORS.
Sadly however `aiohttp_cors` has a 5-year-old bug that breaks CORS support for POST requests (while GET works fine): https://github.com/aio-libs/aiohttp-cors/issues/155
I found that I had to use this code to work around it, but I couldn't get around modifying the backend https://github.com/home-assistant/core/issues/40513#issuecomment-700654471
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `server.py`
Content:
```
1 import os
2 import sys
3 import asyncio
4 import nodes
5 import folder_paths
6 import execution
7 import uuid
8 import json
9 import glob
10 try:
11 import aiohttp
12 from aiohttp import web
13 except ImportError:
14 print("Module 'aiohttp' not installed. Please install it via:")
15 print("pip install aiohttp")
16 print("or")
17 print("pip install -r requirements.txt")
18 sys.exit()
19
20 import mimetypes
21
22
23 @web.middleware
24 async def cache_control(request: web.Request, handler):
25 response: web.Response = await handler(request)
26 if request.path.endswith('.js') or request.path.endswith('.css'):
27 response.headers.setdefault('Cache-Control', 'no-cache')
28 return response
29
30 class PromptServer():
31 def __init__(self, loop):
32 PromptServer.instance = self
33
34 mimetypes.init();
35 mimetypes.types_map['.js'] = 'application/javascript; charset=utf-8'
36 self.prompt_queue = None
37 self.loop = loop
38 self.messages = asyncio.Queue()
39 self.number = 0
40 self.app = web.Application(client_max_size=20971520, middlewares=[cache_control])
41 self.sockets = dict()
42 self.web_root = os.path.join(os.path.dirname(
43 os.path.realpath(__file__)), "web")
44 routes = web.RouteTableDef()
45 self.routes = routes
46 self.last_node_id = None
47 self.client_id = None
48
49 @routes.get('/ws')
50 async def websocket_handler(request):
51 ws = web.WebSocketResponse()
52 await ws.prepare(request)
53 sid = request.rel_url.query.get('clientId', '')
54 if sid:
55 # Reusing existing session, remove old
56 self.sockets.pop(sid, None)
57 else:
58 sid = uuid.uuid4().hex
59
60 self.sockets[sid] = ws
61
62 try:
63 # Send initial state to the new client
64 await self.send("status", { "status": self.get_queue_info(), 'sid': sid }, sid)
65 # On reconnect if we are the currently executing client send the current node
66 if self.client_id == sid and self.last_node_id is not None:
67 await self.send("executing", { "node": self.last_node_id }, sid)
68
69 async for msg in ws:
70 if msg.type == aiohttp.WSMsgType.ERROR:
71 print('ws connection closed with exception %s' % ws.exception())
72 finally:
73 self.sockets.pop(sid, None)
74 return ws
75
76 @routes.get("/")
77 async def get_root(request):
78 return web.FileResponse(os.path.join(self.web_root, "index.html"))
79
80 @routes.get("/embeddings")
81 def get_embeddings(self):
82 embeddings = folder_paths.get_filename_list("embeddings")
83 return web.json_response(list(map(lambda a: os.path.splitext(a)[0].lower(), embeddings)))
84
85 @routes.get("/extensions")
86 async def get_extensions(request):
87 files = glob.glob(os.path.join(self.web_root, 'extensions/**/*.js'), recursive=True)
88 return web.json_response(list(map(lambda f: "/" + os.path.relpath(f, self.web_root).replace("\\", "/"), files)))
89
90 @routes.post("/upload/image")
91 async def upload_image(request):
92 upload_dir = folder_paths.get_input_directory()
93
94 if not os.path.exists(upload_dir):
95 os.makedirs(upload_dir)
96
97 post = await request.post()
98 image = post.get("image")
99
100 if image and image.file:
101 filename = image.filename
102 if not filename:
103 return web.Response(status=400)
104
105 split = os.path.splitext(filename)
106 i = 1
107 while os.path.exists(os.path.join(upload_dir, filename)):
108 filename = f"{split[0]} ({i}){split[1]}"
109 i += 1
110
111 filepath = os.path.join(upload_dir, filename)
112
113 with open(filepath, "wb") as f:
114 f.write(image.file.read())
115
116 return web.json_response({"name" : filename})
117 else:
118 return web.Response(status=400)
119
120
121 @routes.get("/view")
122 async def view_image(request):
123 if "filename" in request.rel_url.query:
124 type = request.rel_url.query.get("type", "output")
125 output_dir = folder_paths.get_directory_by_type(type)
126 if output_dir is None:
127 return web.Response(status=400)
128
129 if "subfolder" in request.rel_url.query:
130 full_output_dir = os.path.join(output_dir, request.rel_url.query["subfolder"])
131 if os.path.commonpath((os.path.abspath(full_output_dir), output_dir)) != output_dir:
132 return web.Response(status=403)
133 output_dir = full_output_dir
134
135 filename = request.rel_url.query["filename"]
136 filename = os.path.basename(filename)
137 file = os.path.join(output_dir, filename)
138
139 if os.path.isfile(file):
140 return web.FileResponse(file, headers={"Content-Disposition": f"filename=\"{filename}\""})
141
142 return web.Response(status=404)
143
144 @routes.get("/prompt")
145 async def get_prompt(request):
146 return web.json_response(self.get_queue_info())
147
148 @routes.get("/object_info")
149 async def get_object_info(request):
150 out = {}
151 for x in nodes.NODE_CLASS_MAPPINGS:
152 obj_class = nodes.NODE_CLASS_MAPPINGS[x]
153 info = {}
154 info['input'] = obj_class.INPUT_TYPES()
155 info['output'] = obj_class.RETURN_TYPES
156 info['output_name'] = obj_class.RETURN_NAMES if hasattr(obj_class, 'RETURN_NAMES') else info['output']
157 info['name'] = x #TODO
158 info['description'] = ''
159 info['category'] = 'sd'
160 if hasattr(obj_class, 'CATEGORY'):
161 info['category'] = obj_class.CATEGORY
162 out[x] = info
163 return web.json_response(out)
164
165 @routes.get("/history")
166 async def get_history(request):
167 return web.json_response(self.prompt_queue.get_history())
168
169 @routes.get("/queue")
170 async def get_queue(request):
171 queue_info = {}
172 current_queue = self.prompt_queue.get_current_queue()
173 queue_info['queue_running'] = current_queue[0]
174 queue_info['queue_pending'] = current_queue[1]
175 return web.json_response(queue_info)
176
177 @routes.post("/prompt")
178 async def post_prompt(request):
179 print("got prompt")
180 resp_code = 200
181 out_string = ""
182 json_data = await request.json()
183
184 if "number" in json_data:
185 number = float(json_data['number'])
186 else:
187 number = self.number
188 if "front" in json_data:
189 if json_data['front']:
190 number = -number
191
192 self.number += 1
193
194 if "prompt" in json_data:
195 prompt = json_data["prompt"]
196 valid = execution.validate_prompt(prompt)
197 extra_data = {}
198 if "extra_data" in json_data:
199 extra_data = json_data["extra_data"]
200
201 if "client_id" in json_data:
202 extra_data["client_id"] = json_data["client_id"]
203 if valid[0]:
204 self.prompt_queue.put((number, id(prompt), prompt, extra_data))
205 else:
206 resp_code = 400
207 out_string = valid[1]
208 print("invalid prompt:", valid[1])
209
210 return web.Response(body=out_string, status=resp_code)
211
212 @routes.post("/queue")
213 async def post_queue(request):
214 json_data = await request.json()
215 if "clear" in json_data:
216 if json_data["clear"]:
217 self.prompt_queue.wipe_queue()
218 if "delete" in json_data:
219 to_delete = json_data['delete']
220 for id_to_delete in to_delete:
221 delete_func = lambda a: a[1] == int(id_to_delete)
222 self.prompt_queue.delete_queue_item(delete_func)
223
224 return web.Response(status=200)
225
226 @routes.post("/interrupt")
227 async def post_interrupt(request):
228 nodes.interrupt_processing()
229 return web.Response(status=200)
230
231 @routes.post("/history")
232 async def post_history(request):
233 json_data = await request.json()
234 if "clear" in json_data:
235 if json_data["clear"]:
236 self.prompt_queue.wipe_history()
237 if "delete" in json_data:
238 to_delete = json_data['delete']
239 for id_to_delete in to_delete:
240 self.prompt_queue.delete_history_item(id_to_delete)
241
242 return web.Response(status=200)
243
244 def add_routes(self):
245 self.app.add_routes(self.routes)
246 self.app.add_routes([
247 web.static('/', self.web_root),
248 ])
249
250 def get_queue_info(self):
251 prompt_info = {}
252 exec_info = {}
253 exec_info['queue_remaining'] = self.prompt_queue.get_tasks_remaining()
254 prompt_info['exec_info'] = exec_info
255 return prompt_info
256
257 async def send(self, event, data, sid=None):
258 message = {"type": event, "data": data}
259
260 if isinstance(message, str) == False:
261 message = json.dumps(message)
262
263 if sid is None:
264 for ws in self.sockets.values():
265 await ws.send_str(message)
266 elif sid in self.sockets:
267 await self.sockets[sid].send_str(message)
268
269 def send_sync(self, event, data, sid=None):
270 self.loop.call_soon_threadsafe(
271 self.messages.put_nowait, (event, data, sid))
272
273 def queue_updated(self):
274 self.send_sync("status", { "status": self.get_queue_info() })
275
276 async def publish_loop(self):
277 while True:
278 msg = await self.messages.get()
279 await self.send(*msg)
280
281 async def start(self, address, port, verbose=True, call_on_start=None):
282 runner = web.AppRunner(self.app)
283 await runner.setup()
284 site = web.TCPSite(runner, address, port)
285 await site.start()
286
287 if address == '':
288 address = '0.0.0.0'
289 if verbose:
290 print("Starting server\n")
291 print("To see the GUI go to: http://{}:{}".format(address, port))
292 if call_on_start is not None:
293 call_on_start(address, port)
294
295
```
Path: `comfy/cli_args.py`
Content:
```
1 import argparse
2
3 parser = argparse.ArgumentParser()
4
5 parser.add_argument("--listen", nargs="?", const="0.0.0.0", default="127.0.0.1", type=str, help="Specify the IP address to listen on (default: 127.0.0.1). If --listen is provided without an argument, it defaults to 0.0.0.0. (listens on all)")
6 parser.add_argument("--port", type=int, default=8188, help="Set the listen port.")
7 parser.add_argument("--extra-model-paths-config", type=str, default=None, help="Load an extra_model_paths.yaml file.")
8 parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.")
9 parser.add_argument("--dont-upcast-attention", action="store_true", help="Disable upcasting of attention. Can boost speed but increase the chances of black images.")
10
11 attn_group = parser.add_mutually_exclusive_group()
12 attn_group.add_argument("--use-split-cross-attention", action="store_true", help="Use the split cross attention optimization instead of the sub-quadratic one. Ignored when xformers is used.")
13 attn_group.add_argument("--use-pytorch-cross-attention", action="store_true", help="Use the new pytorch 2.0 cross attention function.")
14
15 parser.add_argument("--disable-xformers", action="store_true", help="Disable xformers.")
16 parser.add_argument("--cuda-device", type=int, default=None, help="Set the id of the cuda device this instance will use.")
17
18 vram_group = parser.add_mutually_exclusive_group()
19 vram_group.add_argument("--highvram", action="store_true", help="By default models will be unloaded to CPU memory after being used. This option keeps them in GPU memory.")
20 vram_group.add_argument("--normalvram", action="store_true", help="Used to force normal vram use if lowvram gets automatically enabled.")
21 vram_group.add_argument("--lowvram", action="store_true", help="Split the unet in parts to use less vram.")
22 vram_group.add_argument("--novram", action="store_true", help="When lowvram isn't enough.")
23 vram_group.add_argument("--cpu", action="store_true", help="To use the CPU for everything (slow).")
24
25 parser.add_argument("--dont-print-server", action="store_true", help="Don't print server output.")
26 parser.add_argument("--quick-test-for-ci", action="store_true", help="Quick test for CI.")
27 parser.add_argument("--windows-standalone-build", action="store_true", help="Windows standalone build.")
28
29 args = parser.parse_args()
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/comfy/cli_args.py b/comfy/cli_args.py
--- a/comfy/cli_args.py
+++ b/comfy/cli_args.py
@@ -4,8 +4,10 @@
parser.add_argument("--listen", nargs="?", const="0.0.0.0", default="127.0.0.1", type=str, help="Specify the IP address to listen on (default: 127.0.0.1). If --listen is provided without an argument, it defaults to 0.0.0.0. (listens on all)")
parser.add_argument("--port", type=int, default=8188, help="Set the listen port.")
+parser.add_argument("--cors", default=None, nargs="?", const="*", help="Enable CORS (Cross-Origin Resource Sharing) with optional origin or allow all with default '*'.")
parser.add_argument("--extra-model-paths-config", type=str, default=None, help="Load an extra_model_paths.yaml file.")
parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.")
+parser.add_argument("--cuda-device", type=int, default=None, help="Set the id of the cuda device this instance will use.")
parser.add_argument("--dont-upcast-attention", action="store_true", help="Disable upcasting of attention. Can boost speed but increase the chances of black images.")
attn_group = parser.add_mutually_exclusive_group()
@@ -13,7 +15,6 @@
attn_group.add_argument("--use-pytorch-cross-attention", action="store_true", help="Use the new pytorch 2.0 cross attention function.")
parser.add_argument("--disable-xformers", action="store_true", help="Disable xformers.")
-parser.add_argument("--cuda-device", type=int, default=None, help="Set the id of the cuda device this instance will use.")
vram_group = parser.add_mutually_exclusive_group()
vram_group.add_argument("--highvram", action="store_true", help="By default models will be unloaded to CPU memory after being used. This option keeps them in GPU memory.")
diff --git a/server.py b/server.py
--- a/server.py
+++ b/server.py
@@ -18,6 +18,7 @@
sys.exit()
import mimetypes
+from comfy.cli_args import args
@web.middleware
@@ -27,6 +28,23 @@
response.headers.setdefault('Cache-Control', 'no-cache')
return response
+def create_cors_middleware(allowed_origin: str):
+ @web.middleware
+ async def cors_middleware(request: web.Request, handler):
+ if request.method == "OPTIONS":
+ # Pre-flight request. Reply successfully:
+ response = web.Response()
+ else:
+ response = await handler(request)
+
+ response.headers['Access-Control-Allow-Origin'] = allowed_origin
+ response.headers['Access-Control-Allow-Methods'] = 'POST, GET, DELETE, PUT, OPTIONS'
+ response.headers['Access-Control-Allow-Headers'] = 'Content-Type, Authorization'
+ response.headers['Access-Control-Allow-Credentials'] = 'true'
+ return response
+
+ return cors_middleware
+
class PromptServer():
def __init__(self, loop):
PromptServer.instance = self
@@ -37,7 +55,12 @@
self.loop = loop
self.messages = asyncio.Queue()
self.number = 0
- self.app = web.Application(client_max_size=20971520, middlewares=[cache_control])
+
+ middlewares = [cache_control]
+ if args.cors:
+ middlewares.append(create_cors_middleware(args.cors))
+
+ self.app = web.Application(client_max_size=20971520, middlewares=middlewares)
self.sockets = dict()
self.web_root = os.path.join(os.path.dirname(
os.path.realpath(__file__)), "web")
|
{"golden_diff": "diff --git a/comfy/cli_args.py b/comfy/cli_args.py\n--- a/comfy/cli_args.py\n+++ b/comfy/cli_args.py\n@@ -4,8 +4,10 @@\n \n parser.add_argument(\"--listen\", nargs=\"?\", const=\"0.0.0.0\", default=\"127.0.0.1\", type=str, help=\"Specify the IP address to listen on (default: 127.0.0.1). If --listen is provided without an argument, it defaults to 0.0.0.0. (listens on all)\")\n parser.add_argument(\"--port\", type=int, default=8188, help=\"Set the listen port.\")\n+parser.add_argument(\"--cors\", default=None, nargs=\"?\", const=\"*\", help=\"Enable CORS (Cross-Origin Resource Sharing) with optional origin or allow all with default '*'.\")\n parser.add_argument(\"--extra-model-paths-config\", type=str, default=None, help=\"Load an extra_model_paths.yaml file.\")\n parser.add_argument(\"--output-directory\", type=str, default=None, help=\"Set the ComfyUI output directory.\")\n+parser.add_argument(\"--cuda-device\", type=int, default=None, help=\"Set the id of the cuda device this instance will use.\")\n parser.add_argument(\"--dont-upcast-attention\", action=\"store_true\", help=\"Disable upcasting of attention. Can boost speed but increase the chances of black images.\")\n \n attn_group = parser.add_mutually_exclusive_group()\n@@ -13,7 +15,6 @@\n attn_group.add_argument(\"--use-pytorch-cross-attention\", action=\"store_true\", help=\"Use the new pytorch 2.0 cross attention function.\")\n \n parser.add_argument(\"--disable-xformers\", action=\"store_true\", help=\"Disable xformers.\")\n-parser.add_argument(\"--cuda-device\", type=int, default=None, help=\"Set the id of the cuda device this instance will use.\")\n \n vram_group = parser.add_mutually_exclusive_group()\n vram_group.add_argument(\"--highvram\", action=\"store_true\", help=\"By default models will be unloaded to CPU memory after being used. This option keeps them in GPU memory.\")\ndiff --git a/server.py b/server.py\n--- a/server.py\n+++ b/server.py\n@@ -18,6 +18,7 @@\n sys.exit()\n \n import mimetypes\n+from comfy.cli_args import args\n \n \n @web.middleware\n@@ -27,6 +28,23 @@\n response.headers.setdefault('Cache-Control', 'no-cache')\n return response\n \n+def create_cors_middleware(allowed_origin: str):\n+ @web.middleware\n+ async def cors_middleware(request: web.Request, handler):\n+ if request.method == \"OPTIONS\":\n+ # Pre-flight request. Reply successfully:\n+ response = web.Response()\n+ else:\n+ response = await handler(request)\n+\n+ response.headers['Access-Control-Allow-Origin'] = allowed_origin\n+ response.headers['Access-Control-Allow-Methods'] = 'POST, GET, DELETE, PUT, OPTIONS'\n+ response.headers['Access-Control-Allow-Headers'] = 'Content-Type, Authorization'\n+ response.headers['Access-Control-Allow-Credentials'] = 'true'\n+ return response\n+\n+ return cors_middleware\n+\n class PromptServer():\n def __init__(self, loop):\n PromptServer.instance = self\n@@ -37,7 +55,12 @@\n self.loop = loop\n self.messages = asyncio.Queue()\n self.number = 0\n- self.app = web.Application(client_max_size=20971520, middlewares=[cache_control])\n+\n+ middlewares = [cache_control]\n+ if args.cors:\n+ middlewares.append(create_cors_middleware(args.cors))\n+\n+ self.app = web.Application(client_max_size=20971520, middlewares=middlewares)\n self.sockets = dict()\n self.web_root = os.path.join(os.path.dirname(\n os.path.realpath(__file__)), \"web\")\n", "issue": "CORS support in backend\nI want to test a frontend on a different port than the backend on `localhost`, but get blocked by CORS.\r\n\r\nSadly however `aiohttp_cors` has a 5-year-old bug that breaks CORS support for POST requests (while GET works fine): https://github.com/aio-libs/aiohttp-cors/issues/155\r\n\r\nI found that I had to use this code to work around it, but I couldn't get around modifying the backend https://github.com/home-assistant/core/issues/40513#issuecomment-700654471\n", "before_files": [{"content": "import os\nimport sys\nimport asyncio\nimport nodes\nimport folder_paths\nimport execution\nimport uuid\nimport json\nimport glob\ntry:\n import aiohttp\n from aiohttp import web\nexcept ImportError:\n print(\"Module 'aiohttp' not installed. Please install it via:\")\n print(\"pip install aiohttp\")\n print(\"or\")\n print(\"pip install -r requirements.txt\")\n sys.exit()\n\nimport mimetypes\n\n\[email protected]\nasync def cache_control(request: web.Request, handler):\n response: web.Response = await handler(request)\n if request.path.endswith('.js') or request.path.endswith('.css'):\n response.headers.setdefault('Cache-Control', 'no-cache')\n return response\n\nclass PromptServer():\n def __init__(self, loop):\n PromptServer.instance = self\n\n mimetypes.init(); \n mimetypes.types_map['.js'] = 'application/javascript; charset=utf-8'\n self.prompt_queue = None\n self.loop = loop\n self.messages = asyncio.Queue()\n self.number = 0\n self.app = web.Application(client_max_size=20971520, middlewares=[cache_control])\n self.sockets = dict()\n self.web_root = os.path.join(os.path.dirname(\n os.path.realpath(__file__)), \"web\")\n routes = web.RouteTableDef()\n self.routes = routes\n self.last_node_id = None\n self.client_id = None\n\n @routes.get('/ws')\n async def websocket_handler(request):\n ws = web.WebSocketResponse()\n await ws.prepare(request)\n sid = request.rel_url.query.get('clientId', '')\n if sid:\n # Reusing existing session, remove old\n self.sockets.pop(sid, None)\n else:\n sid = uuid.uuid4().hex \n\n self.sockets[sid] = ws\n\n try:\n # Send initial state to the new client\n await self.send(\"status\", { \"status\": self.get_queue_info(), 'sid': sid }, sid)\n # On reconnect if we are the currently executing client send the current node\n if self.client_id == sid and self.last_node_id is not None:\n await self.send(\"executing\", { \"node\": self.last_node_id }, sid)\n \n async for msg in ws:\n if msg.type == aiohttp.WSMsgType.ERROR:\n print('ws connection closed with exception %s' % ws.exception())\n finally:\n self.sockets.pop(sid, None)\n return ws\n\n @routes.get(\"/\")\n async def get_root(request):\n return web.FileResponse(os.path.join(self.web_root, \"index.html\"))\n\n @routes.get(\"/embeddings\")\n def get_embeddings(self):\n embeddings = folder_paths.get_filename_list(\"embeddings\")\n return web.json_response(list(map(lambda a: os.path.splitext(a)[0].lower(), embeddings)))\n\n @routes.get(\"/extensions\")\n async def get_extensions(request):\n files = glob.glob(os.path.join(self.web_root, 'extensions/**/*.js'), recursive=True)\n return web.json_response(list(map(lambda f: \"/\" + os.path.relpath(f, self.web_root).replace(\"\\\\\", \"/\"), files)))\n\n @routes.post(\"/upload/image\")\n async def upload_image(request):\n upload_dir = folder_paths.get_input_directory()\n\n if not os.path.exists(upload_dir):\n os.makedirs(upload_dir)\n \n post = await request.post()\n image = post.get(\"image\")\n\n if image and image.file:\n filename = image.filename\n if not filename:\n return web.Response(status=400)\n\n split = os.path.splitext(filename)\n i = 1\n while os.path.exists(os.path.join(upload_dir, filename)):\n filename = f\"{split[0]} ({i}){split[1]}\"\n i += 1\n\n filepath = os.path.join(upload_dir, filename)\n\n with open(filepath, \"wb\") as f:\n f.write(image.file.read())\n \n return web.json_response({\"name\" : filename})\n else:\n return web.Response(status=400)\n\n\n @routes.get(\"/view\")\n async def view_image(request):\n if \"filename\" in request.rel_url.query:\n type = request.rel_url.query.get(\"type\", \"output\")\n output_dir = folder_paths.get_directory_by_type(type)\n if output_dir is None:\n return web.Response(status=400)\n\n if \"subfolder\" in request.rel_url.query:\n full_output_dir = os.path.join(output_dir, request.rel_url.query[\"subfolder\"])\n if os.path.commonpath((os.path.abspath(full_output_dir), output_dir)) != output_dir:\n return web.Response(status=403)\n output_dir = full_output_dir\n\n filename = request.rel_url.query[\"filename\"]\n filename = os.path.basename(filename)\n file = os.path.join(output_dir, filename)\n\n if os.path.isfile(file):\n return web.FileResponse(file, headers={\"Content-Disposition\": f\"filename=\\\"{filename}\\\"\"})\n \n return web.Response(status=404)\n\n @routes.get(\"/prompt\")\n async def get_prompt(request):\n return web.json_response(self.get_queue_info())\n\n @routes.get(\"/object_info\")\n async def get_object_info(request):\n out = {}\n for x in nodes.NODE_CLASS_MAPPINGS:\n obj_class = nodes.NODE_CLASS_MAPPINGS[x]\n info = {}\n info['input'] = obj_class.INPUT_TYPES()\n info['output'] = obj_class.RETURN_TYPES\n info['output_name'] = obj_class.RETURN_NAMES if hasattr(obj_class, 'RETURN_NAMES') else info['output']\n info['name'] = x #TODO\n info['description'] = ''\n info['category'] = 'sd'\n if hasattr(obj_class, 'CATEGORY'):\n info['category'] = obj_class.CATEGORY\n out[x] = info\n return web.json_response(out)\n\n @routes.get(\"/history\")\n async def get_history(request):\n return web.json_response(self.prompt_queue.get_history())\n\n @routes.get(\"/queue\")\n async def get_queue(request):\n queue_info = {}\n current_queue = self.prompt_queue.get_current_queue()\n queue_info['queue_running'] = current_queue[0]\n queue_info['queue_pending'] = current_queue[1]\n return web.json_response(queue_info)\n\n @routes.post(\"/prompt\")\n async def post_prompt(request):\n print(\"got prompt\")\n resp_code = 200\n out_string = \"\"\n json_data = await request.json()\n\n if \"number\" in json_data:\n number = float(json_data['number'])\n else:\n number = self.number\n if \"front\" in json_data:\n if json_data['front']:\n number = -number\n\n self.number += 1\n\n if \"prompt\" in json_data:\n prompt = json_data[\"prompt\"]\n valid = execution.validate_prompt(prompt)\n extra_data = {}\n if \"extra_data\" in json_data:\n extra_data = json_data[\"extra_data\"]\n\n if \"client_id\" in json_data:\n extra_data[\"client_id\"] = json_data[\"client_id\"]\n if valid[0]:\n self.prompt_queue.put((number, id(prompt), prompt, extra_data))\n else:\n resp_code = 400\n out_string = valid[1]\n print(\"invalid prompt:\", valid[1])\n\n return web.Response(body=out_string, status=resp_code)\n \n @routes.post(\"/queue\")\n async def post_queue(request):\n json_data = await request.json()\n if \"clear\" in json_data:\n if json_data[\"clear\"]:\n self.prompt_queue.wipe_queue()\n if \"delete\" in json_data:\n to_delete = json_data['delete']\n for id_to_delete in to_delete:\n delete_func = lambda a: a[1] == int(id_to_delete)\n self.prompt_queue.delete_queue_item(delete_func)\n \n return web.Response(status=200)\n\n @routes.post(\"/interrupt\")\n async def post_interrupt(request):\n nodes.interrupt_processing()\n return web.Response(status=200)\n\n @routes.post(\"/history\")\n async def post_history(request):\n json_data = await request.json()\n if \"clear\" in json_data:\n if json_data[\"clear\"]:\n self.prompt_queue.wipe_history()\n if \"delete\" in json_data:\n to_delete = json_data['delete']\n for id_to_delete in to_delete:\n self.prompt_queue.delete_history_item(id_to_delete)\n\n return web.Response(status=200)\n \n def add_routes(self):\n self.app.add_routes(self.routes)\n self.app.add_routes([\n web.static('/', self.web_root),\n ])\n\n def get_queue_info(self):\n prompt_info = {}\n exec_info = {}\n exec_info['queue_remaining'] = self.prompt_queue.get_tasks_remaining()\n prompt_info['exec_info'] = exec_info\n return prompt_info\n\n async def send(self, event, data, sid=None):\n message = {\"type\": event, \"data\": data}\n \n if isinstance(message, str) == False:\n message = json.dumps(message)\n\n if sid is None:\n for ws in self.sockets.values():\n await ws.send_str(message)\n elif sid in self.sockets:\n await self.sockets[sid].send_str(message)\n\n def send_sync(self, event, data, sid=None):\n self.loop.call_soon_threadsafe(\n self.messages.put_nowait, (event, data, sid))\n\n def queue_updated(self):\n self.send_sync(\"status\", { \"status\": self.get_queue_info() })\n\n async def publish_loop(self):\n while True:\n msg = await self.messages.get()\n await self.send(*msg)\n\n async def start(self, address, port, verbose=True, call_on_start=None):\n runner = web.AppRunner(self.app)\n await runner.setup()\n site = web.TCPSite(runner, address, port)\n await site.start()\n\n if address == '':\n address = '0.0.0.0'\n if verbose:\n print(\"Starting server\\n\")\n print(\"To see the GUI go to: http://{}:{}\".format(address, port))\n if call_on_start is not None:\n call_on_start(address, port)\n\n", "path": "server.py"}, {"content": "import argparse\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"--listen\", nargs=\"?\", const=\"0.0.0.0\", default=\"127.0.0.1\", type=str, help=\"Specify the IP address to listen on (default: 127.0.0.1). If --listen is provided without an argument, it defaults to 0.0.0.0. (listens on all)\")\nparser.add_argument(\"--port\", type=int, default=8188, help=\"Set the listen port.\")\nparser.add_argument(\"--extra-model-paths-config\", type=str, default=None, help=\"Load an extra_model_paths.yaml file.\")\nparser.add_argument(\"--output-directory\", type=str, default=None, help=\"Set the ComfyUI output directory.\")\nparser.add_argument(\"--dont-upcast-attention\", action=\"store_true\", help=\"Disable upcasting of attention. Can boost speed but increase the chances of black images.\")\n\nattn_group = parser.add_mutually_exclusive_group()\nattn_group.add_argument(\"--use-split-cross-attention\", action=\"store_true\", help=\"Use the split cross attention optimization instead of the sub-quadratic one. Ignored when xformers is used.\")\nattn_group.add_argument(\"--use-pytorch-cross-attention\", action=\"store_true\", help=\"Use the new pytorch 2.0 cross attention function.\")\n\nparser.add_argument(\"--disable-xformers\", action=\"store_true\", help=\"Disable xformers.\")\nparser.add_argument(\"--cuda-device\", type=int, default=None, help=\"Set the id of the cuda device this instance will use.\")\n\nvram_group = parser.add_mutually_exclusive_group()\nvram_group.add_argument(\"--highvram\", action=\"store_true\", help=\"By default models will be unloaded to CPU memory after being used. This option keeps them in GPU memory.\")\nvram_group.add_argument(\"--normalvram\", action=\"store_true\", help=\"Used to force normal vram use if lowvram gets automatically enabled.\")\nvram_group.add_argument(\"--lowvram\", action=\"store_true\", help=\"Split the unet in parts to use less vram.\")\nvram_group.add_argument(\"--novram\", action=\"store_true\", help=\"When lowvram isn't enough.\")\nvram_group.add_argument(\"--cpu\", action=\"store_true\", help=\"To use the CPU for everything (slow).\")\n\nparser.add_argument(\"--dont-print-server\", action=\"store_true\", help=\"Don't print server output.\")\nparser.add_argument(\"--quick-test-for-ci\", action=\"store_true\", help=\"Quick test for CI.\")\nparser.add_argument(\"--windows-standalone-build\", action=\"store_true\", help=\"Windows standalone build.\")\n\nargs = parser.parse_args()\n", "path": "comfy/cli_args.py"}], "after_files": [{"content": "import os\nimport sys\nimport asyncio\nimport nodes\nimport folder_paths\nimport execution\nimport uuid\nimport json\nimport glob\ntry:\n import aiohttp\n from aiohttp import web\nexcept ImportError:\n print(\"Module 'aiohttp' not installed. Please install it via:\")\n print(\"pip install aiohttp\")\n print(\"or\")\n print(\"pip install -r requirements.txt\")\n sys.exit()\n\nimport mimetypes\nfrom comfy.cli_args import args\n\n\[email protected]\nasync def cache_control(request: web.Request, handler):\n response: web.Response = await handler(request)\n if request.path.endswith('.js') or request.path.endswith('.css'):\n response.headers.setdefault('Cache-Control', 'no-cache')\n return response\n\ndef create_cors_middleware(allowed_origin: str):\n @web.middleware\n async def cors_middleware(request: web.Request, handler):\n if request.method == \"OPTIONS\":\n # Pre-flight request. Reply successfully:\n response = web.Response()\n else:\n response = await handler(request)\n\n response.headers['Access-Control-Allow-Origin'] = allowed_origin\n response.headers['Access-Control-Allow-Methods'] = 'POST, GET, DELETE, PUT, OPTIONS'\n response.headers['Access-Control-Allow-Headers'] = 'Content-Type, Authorization'\n response.headers['Access-Control-Allow-Credentials'] = 'true'\n return response\n\n return cors_middleware\n\nclass PromptServer():\n def __init__(self, loop):\n PromptServer.instance = self\n\n mimetypes.init(); \n mimetypes.types_map['.js'] = 'application/javascript; charset=utf-8'\n self.prompt_queue = None\n self.loop = loop\n self.messages = asyncio.Queue()\n self.number = 0\n\n middlewares = [cache_control]\n if args.cors:\n middlewares.append(create_cors_middleware(args.cors))\n\n self.app = web.Application(client_max_size=20971520, middlewares=middlewares)\n self.sockets = dict()\n self.web_root = os.path.join(os.path.dirname(\n os.path.realpath(__file__)), \"web\")\n routes = web.RouteTableDef()\n self.routes = routes\n self.last_node_id = None\n self.client_id = None\n\n @routes.get('/ws')\n async def websocket_handler(request):\n ws = web.WebSocketResponse()\n await ws.prepare(request)\n sid = request.rel_url.query.get('clientId', '')\n if sid:\n # Reusing existing session, remove old\n self.sockets.pop(sid, None)\n else:\n sid = uuid.uuid4().hex \n\n self.sockets[sid] = ws\n\n try:\n # Send initial state to the new client\n await self.send(\"status\", { \"status\": self.get_queue_info(), 'sid': sid }, sid)\n # On reconnect if we are the currently executing client send the current node\n if self.client_id == sid and self.last_node_id is not None:\n await self.send(\"executing\", { \"node\": self.last_node_id }, sid)\n \n async for msg in ws:\n if msg.type == aiohttp.WSMsgType.ERROR:\n print('ws connection closed with exception %s' % ws.exception())\n finally:\n self.sockets.pop(sid, None)\n return ws\n\n @routes.get(\"/\")\n async def get_root(request):\n return web.FileResponse(os.path.join(self.web_root, \"index.html\"))\n\n @routes.get(\"/embeddings\")\n def get_embeddings(self):\n embeddings = folder_paths.get_filename_list(\"embeddings\")\n return web.json_response(list(map(lambda a: os.path.splitext(a)[0].lower(), embeddings)))\n\n @routes.get(\"/extensions\")\n async def get_extensions(request):\n files = glob.glob(os.path.join(self.web_root, 'extensions/**/*.js'), recursive=True)\n return web.json_response(list(map(lambda f: \"/\" + os.path.relpath(f, self.web_root).replace(\"\\\\\", \"/\"), files)))\n\n @routes.post(\"/upload/image\")\n async def upload_image(request):\n upload_dir = folder_paths.get_input_directory()\n\n if not os.path.exists(upload_dir):\n os.makedirs(upload_dir)\n \n post = await request.post()\n image = post.get(\"image\")\n\n if image and image.file:\n filename = image.filename\n if not filename:\n return web.Response(status=400)\n\n split = os.path.splitext(filename)\n i = 1\n while os.path.exists(os.path.join(upload_dir, filename)):\n filename = f\"{split[0]} ({i}){split[1]}\"\n i += 1\n\n filepath = os.path.join(upload_dir, filename)\n\n with open(filepath, \"wb\") as f:\n f.write(image.file.read())\n \n return web.json_response({\"name\" : filename})\n else:\n return web.Response(status=400)\n\n\n @routes.get(\"/view\")\n async def view_image(request):\n if \"filename\" in request.rel_url.query:\n type = request.rel_url.query.get(\"type\", \"output\")\n output_dir = folder_paths.get_directory_by_type(type)\n if output_dir is None:\n return web.Response(status=400)\n\n if \"subfolder\" in request.rel_url.query:\n full_output_dir = os.path.join(output_dir, request.rel_url.query[\"subfolder\"])\n if os.path.commonpath((os.path.abspath(full_output_dir), output_dir)) != output_dir:\n return web.Response(status=403)\n output_dir = full_output_dir\n\n filename = request.rel_url.query[\"filename\"]\n filename = os.path.basename(filename)\n file = os.path.join(output_dir, filename)\n\n if os.path.isfile(file):\n return web.FileResponse(file, headers={\"Content-Disposition\": f\"filename=\\\"{filename}\\\"\"})\n \n return web.Response(status=404)\n\n @routes.get(\"/prompt\")\n async def get_prompt(request):\n return web.json_response(self.get_queue_info())\n\n @routes.get(\"/object_info\")\n async def get_object_info(request):\n out = {}\n for x in nodes.NODE_CLASS_MAPPINGS:\n obj_class = nodes.NODE_CLASS_MAPPINGS[x]\n info = {}\n info['input'] = obj_class.INPUT_TYPES()\n info['output'] = obj_class.RETURN_TYPES\n info['output_name'] = obj_class.RETURN_NAMES if hasattr(obj_class, 'RETURN_NAMES') else info['output']\n info['name'] = x #TODO\n info['description'] = ''\n info['category'] = 'sd'\n if hasattr(obj_class, 'CATEGORY'):\n info['category'] = obj_class.CATEGORY\n out[x] = info\n return web.json_response(out)\n\n @routes.get(\"/history\")\n async def get_history(request):\n return web.json_response(self.prompt_queue.get_history())\n\n @routes.get(\"/queue\")\n async def get_queue(request):\n queue_info = {}\n current_queue = self.prompt_queue.get_current_queue()\n queue_info['queue_running'] = current_queue[0]\n queue_info['queue_pending'] = current_queue[1]\n return web.json_response(queue_info)\n\n @routes.post(\"/prompt\")\n async def post_prompt(request):\n print(\"got prompt\")\n resp_code = 200\n out_string = \"\"\n json_data = await request.json()\n\n if \"number\" in json_data:\n number = float(json_data['number'])\n else:\n number = self.number\n if \"front\" in json_data:\n if json_data['front']:\n number = -number\n\n self.number += 1\n\n if \"prompt\" in json_data:\n prompt = json_data[\"prompt\"]\n valid = execution.validate_prompt(prompt)\n extra_data = {}\n if \"extra_data\" in json_data:\n extra_data = json_data[\"extra_data\"]\n\n if \"client_id\" in json_data:\n extra_data[\"client_id\"] = json_data[\"client_id\"]\n if valid[0]:\n self.prompt_queue.put((number, id(prompt), prompt, extra_data))\n else:\n resp_code = 400\n out_string = valid[1]\n print(\"invalid prompt:\", valid[1])\n\n return web.Response(body=out_string, status=resp_code)\n \n @routes.post(\"/queue\")\n async def post_queue(request):\n json_data = await request.json()\n if \"clear\" in json_data:\n if json_data[\"clear\"]:\n self.prompt_queue.wipe_queue()\n if \"delete\" in json_data:\n to_delete = json_data['delete']\n for id_to_delete in to_delete:\n delete_func = lambda a: a[1] == int(id_to_delete)\n self.prompt_queue.delete_queue_item(delete_func)\n \n return web.Response(status=200)\n\n @routes.post(\"/interrupt\")\n async def post_interrupt(request):\n nodes.interrupt_processing()\n return web.Response(status=200)\n\n @routes.post(\"/history\")\n async def post_history(request):\n json_data = await request.json()\n if \"clear\" in json_data:\n if json_data[\"clear\"]:\n self.prompt_queue.wipe_history()\n if \"delete\" in json_data:\n to_delete = json_data['delete']\n for id_to_delete in to_delete:\n self.prompt_queue.delete_history_item(id_to_delete)\n\n return web.Response(status=200)\n \n def add_routes(self):\n self.app.add_routes(self.routes)\n self.app.add_routes([\n web.static('/', self.web_root),\n ])\n\n def get_queue_info(self):\n prompt_info = {}\n exec_info = {}\n exec_info['queue_remaining'] = self.prompt_queue.get_tasks_remaining()\n prompt_info['exec_info'] = exec_info\n return prompt_info\n\n async def send(self, event, data, sid=None):\n message = {\"type\": event, \"data\": data}\n \n if isinstance(message, str) == False:\n message = json.dumps(message)\n\n if sid is None:\n for ws in self.sockets.values():\n await ws.send_str(message)\n elif sid in self.sockets:\n await self.sockets[sid].send_str(message)\n\n def send_sync(self, event, data, sid=None):\n self.loop.call_soon_threadsafe(\n self.messages.put_nowait, (event, data, sid))\n\n def queue_updated(self):\n self.send_sync(\"status\", { \"status\": self.get_queue_info() })\n\n async def publish_loop(self):\n while True:\n msg = await self.messages.get()\n await self.send(*msg)\n\n async def start(self, address, port, verbose=True, call_on_start=None):\n runner = web.AppRunner(self.app)\n await runner.setup()\n site = web.TCPSite(runner, address, port)\n await site.start()\n\n if address == '':\n address = '0.0.0.0'\n if verbose:\n print(\"Starting server\\n\")\n print(\"To see the GUI go to: http://{}:{}\".format(address, port))\n if call_on_start is not None:\n call_on_start(address, port)\n\n", "path": "server.py"}, {"content": "import argparse\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"--listen\", nargs=\"?\", const=\"0.0.0.0\", default=\"127.0.0.1\", type=str, help=\"Specify the IP address to listen on (default: 127.0.0.1). If --listen is provided without an argument, it defaults to 0.0.0.0. (listens on all)\")\nparser.add_argument(\"--port\", type=int, default=8188, help=\"Set the listen port.\")\nparser.add_argument(\"--cors\", default=None, nargs=\"?\", const=\"*\", help=\"Enable CORS (Cross-Origin Resource Sharing) with optional origin or allow all with default '*'.\")\nparser.add_argument(\"--extra-model-paths-config\", type=str, default=None, help=\"Load an extra_model_paths.yaml file.\")\nparser.add_argument(\"--output-directory\", type=str, default=None, help=\"Set the ComfyUI output directory.\")\nparser.add_argument(\"--cuda-device\", type=int, default=None, help=\"Set the id of the cuda device this instance will use.\")\nparser.add_argument(\"--dont-upcast-attention\", action=\"store_true\", help=\"Disable upcasting of attention. Can boost speed but increase the chances of black images.\")\n\nattn_group = parser.add_mutually_exclusive_group()\nattn_group.add_argument(\"--use-split-cross-attention\", action=\"store_true\", help=\"Use the split cross attention optimization instead of the sub-quadratic one. Ignored when xformers is used.\")\nattn_group.add_argument(\"--use-pytorch-cross-attention\", action=\"store_true\", help=\"Use the new pytorch 2.0 cross attention function.\")\n\nparser.add_argument(\"--disable-xformers\", action=\"store_true\", help=\"Disable xformers.\")\n\nvram_group = parser.add_mutually_exclusive_group()\nvram_group.add_argument(\"--highvram\", action=\"store_true\", help=\"By default models will be unloaded to CPU memory after being used. This option keeps them in GPU memory.\")\nvram_group.add_argument(\"--normalvram\", action=\"store_true\", help=\"Used to force normal vram use if lowvram gets automatically enabled.\")\nvram_group.add_argument(\"--lowvram\", action=\"store_true\", help=\"Split the unet in parts to use less vram.\")\nvram_group.add_argument(\"--novram\", action=\"store_true\", help=\"When lowvram isn't enough.\")\nvram_group.add_argument(\"--cpu\", action=\"store_true\", help=\"To use the CPU for everything (slow).\")\n\nparser.add_argument(\"--dont-print-server\", action=\"store_true\", help=\"Don't print server output.\")\nparser.add_argument(\"--quick-test-for-ci\", action=\"store_true\", help=\"Quick test for CI.\")\nparser.add_argument(\"--windows-standalone-build\", action=\"store_true\", help=\"Windows standalone build.\")\n\nargs = parser.parse_args()\n", "path": "comfy/cli_args.py"}]}
| 4,012 | 859 |
gh_patches_debug_15618
|
rasdani/github-patches
|
git_diff
|
opsdroid__opsdroid-930
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add time to crontab log message
When the cron parser is triggered it emits a debug log saying `Running crontab skills`.
It would be more useful if it included the time that opsdroid thinks it is. This would help when trying to debug issues where skills are triggered at the wrong time due to opsdroid having the wrong timezone.
The line which needs updating is [here](https://github.com/opsdroid/opsdroid/blob/master/opsdroid/parsers/crontab.py#L17).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/parsers/crontab.py`
Content:
```
1 """A helper function for parsing and executing crontab skills."""
2
3 import asyncio
4 import logging
5
6 import arrow
7 import pycron
8
9
10 _LOGGER = logging.getLogger(__name__)
11
12
13 async def parse_crontab(opsdroid):
14 """Parse all crontab skills against the current time."""
15 while opsdroid.eventloop.is_running():
16 await asyncio.sleep(60 - arrow.now().time().second)
17 _LOGGER.debug(_("Running crontab skills"))
18 for skill in opsdroid.skills:
19 for matcher in skill.matchers:
20 if "crontab" in matcher:
21 if matcher["timezone"] is not None:
22 timezone = matcher["timezone"]
23 else:
24 timezone = opsdroid.config.get("timezone", "UTC")
25 if pycron.is_now(matcher["crontab"],
26 arrow.now(tz=timezone)):
27 await opsdroid.run_skill(skill,
28 skill.config,
29 None)
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/opsdroid/parsers/crontab.py b/opsdroid/parsers/crontab.py
--- a/opsdroid/parsers/crontab.py
+++ b/opsdroid/parsers/crontab.py
@@ -1,5 +1,5 @@
"""A helper function for parsing and executing crontab skills."""
-
+import time
import asyncio
import logging
@@ -14,7 +14,7 @@
"""Parse all crontab skills against the current time."""
while opsdroid.eventloop.is_running():
await asyncio.sleep(60 - arrow.now().time().second)
- _LOGGER.debug(_("Running crontab skills"))
+ _LOGGER.debug(_("Running crontab skills at %s "), time.asctime())
for skill in opsdroid.skills:
for matcher in skill.matchers:
if "crontab" in matcher:
|
{"golden_diff": "diff --git a/opsdroid/parsers/crontab.py b/opsdroid/parsers/crontab.py\n--- a/opsdroid/parsers/crontab.py\n+++ b/opsdroid/parsers/crontab.py\n@@ -1,5 +1,5 @@\n \"\"\"A helper function for parsing and executing crontab skills.\"\"\"\n-\n+import time\n import asyncio\n import logging\n \n@@ -14,7 +14,7 @@\n \"\"\"Parse all crontab skills against the current time.\"\"\"\n while opsdroid.eventloop.is_running():\n await asyncio.sleep(60 - arrow.now().time().second)\n- _LOGGER.debug(_(\"Running crontab skills\"))\n+ _LOGGER.debug(_(\"Running crontab skills at %s \"), time.asctime())\n for skill in opsdroid.skills:\n for matcher in skill.matchers:\n if \"crontab\" in matcher:\n", "issue": "Add time to crontab log message\nWhen the cron parser is triggered it emits a debug log saying `Running crontab skills`.\r\n\r\nIt would be more useful if it included the time that opsdroid thinks it is. This would help when trying to debug issues where skills are triggered at the wrong time due to opsdroid having the wrong timezone.\r\n\r\nThe line which needs updating is [here](https://github.com/opsdroid/opsdroid/blob/master/opsdroid/parsers/crontab.py#L17). \n", "before_files": [{"content": "\"\"\"A helper function for parsing and executing crontab skills.\"\"\"\n\nimport asyncio\nimport logging\n\nimport arrow\nimport pycron\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def parse_crontab(opsdroid):\n \"\"\"Parse all crontab skills against the current time.\"\"\"\n while opsdroid.eventloop.is_running():\n await asyncio.sleep(60 - arrow.now().time().second)\n _LOGGER.debug(_(\"Running crontab skills\"))\n for skill in opsdroid.skills:\n for matcher in skill.matchers:\n if \"crontab\" in matcher:\n if matcher[\"timezone\"] is not None:\n timezone = matcher[\"timezone\"]\n else:\n timezone = opsdroid.config.get(\"timezone\", \"UTC\")\n if pycron.is_now(matcher[\"crontab\"],\n arrow.now(tz=timezone)):\n await opsdroid.run_skill(skill,\n skill.config,\n None)\n", "path": "opsdroid/parsers/crontab.py"}], "after_files": [{"content": "\"\"\"A helper function for parsing and executing crontab skills.\"\"\"\nimport time\nimport asyncio\nimport logging\n\nimport arrow\nimport pycron\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def parse_crontab(opsdroid):\n \"\"\"Parse all crontab skills against the current time.\"\"\"\n while opsdroid.eventloop.is_running():\n await asyncio.sleep(60 - arrow.now().time().second)\n _LOGGER.debug(_(\"Running crontab skills at %s \"), time.asctime())\n for skill in opsdroid.skills:\n for matcher in skill.matchers:\n if \"crontab\" in matcher:\n if matcher[\"timezone\"] is not None:\n timezone = matcher[\"timezone\"]\n else:\n timezone = opsdroid.config.get(\"timezone\", \"UTC\")\n if pycron.is_now(matcher[\"crontab\"],\n arrow.now(tz=timezone)):\n await opsdroid.run_skill(skill,\n skill.config,\n None)\n", "path": "opsdroid/parsers/crontab.py"}]}
| 626 | 194 |
gh_patches_debug_8828
|
rasdani/github-patches
|
git_diff
|
mozmeao__snippets-service-1238
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Just published jobs with global limits get prematurely completed due to missing metrics.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `snippets/base/management/commands/update_jobs.py`
Content:
```
1 from datetime import datetime, timedelta
2
3 from django.contrib.auth import get_user_model
4 from django.core.management.base import BaseCommand
5 from django.db import transaction
6 from django.db.models import F, Q
7
8 from snippets.base.models import Job
9
10
11 class Command(BaseCommand):
12 args = "(no args)"
13 help = "Update Jobs"
14
15 @transaction.atomic
16 def handle(self, *args, **options):
17 now = datetime.utcnow()
18 user = get_user_model().objects.get_or_create(username='snippets_bot')[0]
19 count_total_completed = 0
20
21 # Publish Scheduled Jobs with `publish_start` before now or without
22 # publish_start.
23 jobs = Job.objects.filter(status=Job.SCHEDULED).filter(
24 Q(publish_start__lte=now) | Q(publish_start=None)
25 )
26 count_published = jobs.count()
27 for job in jobs:
28 job.change_status(
29 status=Job.PUBLISHED,
30 user=user,
31 reason='Published start date reached.',
32 )
33
34 # Disable Published Jobs with `publish_end` before now.
35 jobs = Job.objects.filter(status=Job.PUBLISHED, publish_end__lte=now)
36 count_publication_end = jobs.count()
37 count_total_completed += count_publication_end
38
39 for job in jobs:
40 job.change_status(
41 status=Job.COMPLETED,
42 user=user,
43 reason='Publication end date reached.',
44 )
45
46 # Disable Jobs that reached Impression, Click or Block limits.
47 count_limit = {}
48 for limit in ['impressions', 'clicks', 'blocks']:
49 jobs = (Job.objects
50 .filter(status=Job.PUBLISHED)
51 .exclude(**{f'limit_{limit}': 0})
52 .filter(**{f'limit_{limit}__lte': F(f'metric_{limit}')}))
53 for job in jobs:
54 job.change_status(
55 status=Job.COMPLETED,
56 user=user,
57 reason=f'Limit reached: {limit}.',
58 )
59
60 count_limit[limit] = jobs.count()
61 count_total_completed += count_limit[limit]
62
63 # Disable Jobs that have Impression, Click or Block limits but don't
64 # have metrics data for at least 24h. This is to handle cases where the
65 # Metrics Pipeline is broken.
66 yesterday = datetime.utcnow() - timedelta(days=1)
67 jobs = (Job.objects
68 .filter(status=Job.PUBLISHED)
69 .exclude(limit_impressions=0, limit_clicks=0, limit_blocks=0)
70 .filter(metric_last_update__lt=yesterday))
71 for job in jobs:
72 job.change_status(
73 status=Job.COMPLETED,
74 user=user,
75 reason=f'Premature termination due to missing metrics.',
76 )
77 count_premature_termination = jobs.count()
78 count_total_completed += count_premature_termination
79
80 count_running = Job.objects.filter(status=Job.PUBLISHED).count()
81
82 self.stdout.write(
83 f'Jobs Published: {count_published}\n'
84 f'Jobs Completed: {count_total_completed}\n'
85 f' - Reached Publication End Date: {count_publication_end}\n'
86 f' - Reached Impressions Limit: {count_limit["impressions"]}\n'
87 f' - Reached Clicks Limit: {count_limit["clicks"]}\n'
88 f' - Reached Blocks Limit: {count_limit["blocks"]}\n'
89 f' - Premature Termination due to missing metrics: {count_premature_termination}\n'
90 f'Total Jobs Running: {count_running}\n'
91 )
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/snippets/base/management/commands/update_jobs.py b/snippets/base/management/commands/update_jobs.py
--- a/snippets/base/management/commands/update_jobs.py
+++ b/snippets/base/management/commands/update_jobs.py
@@ -67,6 +67,8 @@
jobs = (Job.objects
.filter(status=Job.PUBLISHED)
.exclude(limit_impressions=0, limit_clicks=0, limit_blocks=0)
+ # Exclude Jobs with limits which haven't been updated once yet.
+ .exclude(metric_last_update='1970-01-01')
.filter(metric_last_update__lt=yesterday))
for job in jobs:
job.change_status(
|
{"golden_diff": "diff --git a/snippets/base/management/commands/update_jobs.py b/snippets/base/management/commands/update_jobs.py\n--- a/snippets/base/management/commands/update_jobs.py\n+++ b/snippets/base/management/commands/update_jobs.py\n@@ -67,6 +67,8 @@\n jobs = (Job.objects\n .filter(status=Job.PUBLISHED)\n .exclude(limit_impressions=0, limit_clicks=0, limit_blocks=0)\n+ # Exclude Jobs with limits which haven't been updated once yet.\n+ .exclude(metric_last_update='1970-01-01')\n .filter(metric_last_update__lt=yesterday))\n for job in jobs:\n job.change_status(\n", "issue": "Just published jobs with global limits get prematurely completed due to missing metrics.\n\n", "before_files": [{"content": "from datetime import datetime, timedelta\n\nfrom django.contrib.auth import get_user_model\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\nfrom django.db.models import F, Q\n\nfrom snippets.base.models import Job\n\n\nclass Command(BaseCommand):\n args = \"(no args)\"\n help = \"Update Jobs\"\n\n @transaction.atomic\n def handle(self, *args, **options):\n now = datetime.utcnow()\n user = get_user_model().objects.get_or_create(username='snippets_bot')[0]\n count_total_completed = 0\n\n # Publish Scheduled Jobs with `publish_start` before now or without\n # publish_start.\n jobs = Job.objects.filter(status=Job.SCHEDULED).filter(\n Q(publish_start__lte=now) | Q(publish_start=None)\n )\n count_published = jobs.count()\n for job in jobs:\n job.change_status(\n status=Job.PUBLISHED,\n user=user,\n reason='Published start date reached.',\n )\n\n # Disable Published Jobs with `publish_end` before now.\n jobs = Job.objects.filter(status=Job.PUBLISHED, publish_end__lte=now)\n count_publication_end = jobs.count()\n count_total_completed += count_publication_end\n\n for job in jobs:\n job.change_status(\n status=Job.COMPLETED,\n user=user,\n reason='Publication end date reached.',\n )\n\n # Disable Jobs that reached Impression, Click or Block limits.\n count_limit = {}\n for limit in ['impressions', 'clicks', 'blocks']:\n jobs = (Job.objects\n .filter(status=Job.PUBLISHED)\n .exclude(**{f'limit_{limit}': 0})\n .filter(**{f'limit_{limit}__lte': F(f'metric_{limit}')}))\n for job in jobs:\n job.change_status(\n status=Job.COMPLETED,\n user=user,\n reason=f'Limit reached: {limit}.',\n )\n\n count_limit[limit] = jobs.count()\n count_total_completed += count_limit[limit]\n\n # Disable Jobs that have Impression, Click or Block limits but don't\n # have metrics data for at least 24h. This is to handle cases where the\n # Metrics Pipeline is broken.\n yesterday = datetime.utcnow() - timedelta(days=1)\n jobs = (Job.objects\n .filter(status=Job.PUBLISHED)\n .exclude(limit_impressions=0, limit_clicks=0, limit_blocks=0)\n .filter(metric_last_update__lt=yesterday))\n for job in jobs:\n job.change_status(\n status=Job.COMPLETED,\n user=user,\n reason=f'Premature termination due to missing metrics.',\n )\n count_premature_termination = jobs.count()\n count_total_completed += count_premature_termination\n\n count_running = Job.objects.filter(status=Job.PUBLISHED).count()\n\n self.stdout.write(\n f'Jobs Published: {count_published}\\n'\n f'Jobs Completed: {count_total_completed}\\n'\n f' - Reached Publication End Date: {count_publication_end}\\n'\n f' - Reached Impressions Limit: {count_limit[\"impressions\"]}\\n'\n f' - Reached Clicks Limit: {count_limit[\"clicks\"]}\\n'\n f' - Reached Blocks Limit: {count_limit[\"blocks\"]}\\n'\n f' - Premature Termination due to missing metrics: {count_premature_termination}\\n'\n f'Total Jobs Running: {count_running}\\n'\n )\n", "path": "snippets/base/management/commands/update_jobs.py"}], "after_files": [{"content": "from datetime import datetime, timedelta\n\nfrom django.contrib.auth import get_user_model\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\nfrom django.db.models import F, Q\n\nfrom snippets.base.models import Job\n\n\nclass Command(BaseCommand):\n args = \"(no args)\"\n help = \"Update Jobs\"\n\n @transaction.atomic\n def handle(self, *args, **options):\n now = datetime.utcnow()\n user = get_user_model().objects.get_or_create(username='snippets_bot')[0]\n count_total_completed = 0\n\n # Publish Scheduled Jobs with `publish_start` before now or without\n # publish_start.\n jobs = Job.objects.filter(status=Job.SCHEDULED).filter(\n Q(publish_start__lte=now) | Q(publish_start=None)\n )\n count_published = jobs.count()\n for job in jobs:\n job.change_status(\n status=Job.PUBLISHED,\n user=user,\n reason='Published start date reached.',\n )\n\n # Disable Published Jobs with `publish_end` before now.\n jobs = Job.objects.filter(status=Job.PUBLISHED, publish_end__lte=now)\n count_publication_end = jobs.count()\n count_total_completed += count_publication_end\n\n for job in jobs:\n job.change_status(\n status=Job.COMPLETED,\n user=user,\n reason='Publication end date reached.',\n )\n\n # Disable Jobs that reached Impression, Click or Block limits.\n count_limit = {}\n for limit in ['impressions', 'clicks', 'blocks']:\n jobs = (Job.objects\n .filter(status=Job.PUBLISHED)\n .exclude(**{f'limit_{limit}': 0})\n .filter(**{f'limit_{limit}__lte': F(f'metric_{limit}')}))\n for job in jobs:\n job.change_status(\n status=Job.COMPLETED,\n user=user,\n reason=f'Limit reached: {limit}.',\n )\n\n count_limit[limit] = jobs.count()\n count_total_completed += count_limit[limit]\n\n # Disable Jobs that have Impression, Click or Block limits but don't\n # have metrics data for at least 24h. This is to handle cases where the\n # Metrics Pipeline is broken.\n yesterday = datetime.utcnow() - timedelta(days=1)\n jobs = (Job.objects\n .filter(status=Job.PUBLISHED)\n .exclude(limit_impressions=0, limit_clicks=0, limit_blocks=0)\n # Exclude Jobs with limits which haven't been updated once yet.\n .exclude(metric_last_update='1970-01-01')\n .filter(metric_last_update__lt=yesterday))\n for job in jobs:\n job.change_status(\n status=Job.COMPLETED,\n user=user,\n reason=f'Premature termination due to missing metrics.',\n )\n count_premature_termination = jobs.count()\n count_total_completed += count_premature_termination\n\n count_running = Job.objects.filter(status=Job.PUBLISHED).count()\n\n self.stdout.write(\n f'Jobs Published: {count_published}\\n'\n f'Jobs Completed: {count_total_completed}\\n'\n f' - Reached Publication End Date: {count_publication_end}\\n'\n f' - Reached Impressions Limit: {count_limit[\"impressions\"]}\\n'\n f' - Reached Clicks Limit: {count_limit[\"clicks\"]}\\n'\n f' - Reached Blocks Limit: {count_limit[\"blocks\"]}\\n'\n f' - Premature Termination due to missing metrics: {count_premature_termination}\\n'\n f'Total Jobs Running: {count_running}\\n'\n )\n", "path": "snippets/base/management/commands/update_jobs.py"}]}
| 1,228 | 157 |
gh_patches_debug_6786
|
rasdani/github-patches
|
git_diff
|
google__jax-6232
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
linear_transpose involving fft seemingly incorrect
I was planning to implement functionality that requires me to be able to take the transpose of a complicated linear function, which includes fourier transforms. I noticed that the fft module seems to implement the rules for transposition; the last few lines of https://jax.readthedocs.io/en/latest/_modules/jax/_src/lax/fft.html seem to pertain to it.
I am not familiar enough with the jax API at present to point out any bugs; but below is a simple test that demonstrates what I believe to be a bug. The build_matrix helper function explicitly constructs the coefficients of the linear function by feeding it all delta functions in sequence. If I linear_transpose my function, that should be identical to the matrix transpose of the built matrix. Yet it doesnt. It seems as if im getting my result in reverse order (plus another off-by-one index bug I think).
Seems to me like a bug in the implementation of the transpose rules for ffts; but again not qualified myself to spot it.
While on the topic, slightly related question: when viewed as linear operators, convolution and correlation are transposed operators. Should I trust jax to figure out efficient transformations along these lines (assuming the underlying fft rules are bug free); or is it likely optimal for me to figure out how to override the linear transpose of a convolution with my own handcrafted correlation functions (and vice versa)?
Code to reproduce:
```python
import numpy as np
import jax
from jax import numpy as jnp
import matplotlib.pyplot as plt
np.random.seed(0)
signal = np.cumsum(np.random.randn(2**8))
signal_jax = jnp.array(signal)
x = np.linspace(-1, 1, len(signal))
psf = np.clip(0.2 - np.abs(x), 0, 1) * (x > 0)
psf /= psf.sum()
psf_jax = jnp.array(psf)
jrfft = jax.jit(jnp.fft.rfft)
jirfft = jax.jit(jnp.fft.irfft)
@jax.jit
def convolve(a, b):
fa = jrfft(a)
fb = jrfft(b)
return jirfft(fa * fb)
@jax.jit
def correlate(a, b):
"""NOTE: can this be implemented as a transposition rule according to:
https://jax.readthedocs.io/en/latest/_modules/jax/_src/lax/fft.html
"""
fa = jrfft(a).conj()
fb = jrfft(b)
return jirfft(fa * fb)
def psf_convolve(psf):
"""statically bind psf arg"""
psf = jax.numpy.fft.ifftshift(psf)
return lambda a: convolve(psf, a)
def psf_correlate(psf):
"""statically bind psf arg. psf assumed to be centered"""
psf = jax.numpy.fft.ifftshift(psf)
return lambda a: correlate(psf, a)
import types
def build_matrix(func, shape):
"""explicitly evaluate coeeficient matrix of linear operator func by calling it repeatedly with delta functions"""
i, j = shape
Z = []
I = jnp.eye(i, j)
for r in range(i):
z = func(I[r])
Z.append(z)
return jnp.array(Z)
func = psf_convolve(psf_jax)
arr = types.SimpleNamespace(shape=signal_jax.shape, dtype=np.float32)
func_trans = lambda a: jax.linear_transpose(func, arr)(a)[0]
N = len(signal)
plt.figure()
M = build_matrix(func, (N, N)).T
plt.imshow(M)
plt.figure()
M = build_matrix(func_trans, (N, N))
plt.imshow(M)
plt.show()
```
- [ ] If applicable, include full error messages/tracebacks.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `jax/_src/lax/fft.py`
Content:
```
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 from functools import partial
17
18 import numpy as np
19
20 from jax.api import jit, linear_transpose, ShapeDtypeStruct
21 from jax.core import Primitive
22 from jax.interpreters import xla
23 from jax._src.util import prod
24 from jax import dtypes, lax
25 from jax.lib import xla_client
26 from jax.interpreters import ad
27 from jax.interpreters import batching
28 from jax.lib import pocketfft
29
30 xops = xla_client.ops
31
32 __all__ = [
33 "fft",
34 "fft_p",
35 ]
36
37 def _promote_to_complex(arg):
38 dtype = dtypes.result_type(arg, np.complex64)
39 return lax.convert_element_type(arg, dtype)
40
41 def _promote_to_real(arg):
42 dtype = dtypes.result_type(arg, np.float32)
43 return lax.convert_element_type(arg, dtype)
44
45 def fft(x, fft_type, fft_lengths):
46 if fft_type == xla_client.FftType.RFFT:
47 if np.iscomplexobj(x):
48 raise ValueError("only real valued inputs supported for rfft")
49 x = _promote_to_real(x)
50 else:
51 x = _promote_to_complex(x)
52 if len(fft_lengths) == 0:
53 # XLA FFT doesn't support 0-rank.
54 return x
55 fft_lengths = tuple(fft_lengths)
56 return fft_p.bind(x, fft_type=fft_type, fft_lengths=fft_lengths)
57
58 def fft_impl(x, fft_type, fft_lengths):
59 return xla.apply_primitive(fft_p, x, fft_type=fft_type, fft_lengths=fft_lengths)
60
61 _complex_dtype = lambda dtype: (np.zeros((), dtype) + np.zeros((), np.complex64)).dtype
62 _real_dtype = lambda dtype: np.zeros((), dtype).real.dtype
63 _is_even = lambda x: x % 2 == 0
64
65 def fft_abstract_eval(x, fft_type, fft_lengths):
66 if fft_type == xla_client.FftType.RFFT:
67 shape = (x.shape[:-len(fft_lengths)] + fft_lengths[:-1]
68 + (fft_lengths[-1] // 2 + 1,))
69 dtype = _complex_dtype(x.dtype)
70 elif fft_type == xla_client.FftType.IRFFT:
71 shape = x.shape[:-len(fft_lengths)] + fft_lengths
72 dtype = _real_dtype(x.dtype)
73 else:
74 shape = x.shape
75 dtype = x.dtype
76 return x.update(shape=shape, dtype=dtype)
77
78 def fft_translation_rule(c, x, fft_type, fft_lengths):
79 return xops.Fft(x, fft_type, fft_lengths)
80
81 def _naive_rfft(x, fft_lengths):
82 y = fft(x, xla_client.FftType.FFT, fft_lengths)
83 n = fft_lengths[-1]
84 return y[..., : n//2 + 1]
85
86 @partial(jit, static_argnums=1)
87 def _rfft_transpose(t, fft_lengths):
88 # The transpose of RFFT can't be expressed only in terms of irfft. Instead of
89 # manually building up larger twiddle matrices (which would increase the
90 # asymptotic complexity and is also rather complicated), we rely JAX to
91 # transpose a naive RFFT implementation.
92 dummy_shape = t.shape[:-len(fft_lengths)] + fft_lengths
93 dummy_primal = ShapeDtypeStruct(dummy_shape, _real_dtype(t.dtype))
94 transpose = linear_transpose(
95 partial(_naive_rfft, fft_lengths=fft_lengths), dummy_primal)
96 result, = transpose(t)
97 assert result.dtype == _real_dtype(t.dtype), (result.dtype, t.dtype)
98 return result
99
100 def _irfft_transpose(t, fft_lengths):
101 # The transpose of IRFFT is the RFFT of the cotangent times a scaling
102 # factor and a mask. The mask scales the cotangent for the Hermitian
103 # symmetric components of the RFFT by a factor of two, since these components
104 # are de-duplicated in the RFFT.
105 x = fft(t, xla_client.FftType.RFFT, fft_lengths)
106 n = x.shape[-1]
107 is_odd = fft_lengths[-1] % 2
108 full = partial(lax.full_like, t, dtype=t.dtype)
109 mask = lax.concatenate(
110 [full(1.0, shape=(1,)),
111 full(2.0, shape=(n - 2 + is_odd,)),
112 full(1.0, shape=(1 - is_odd,))],
113 dimension=0)
114 scale = 1 / prod(fft_lengths)
115 out = scale * mask * x
116 assert out.dtype == _complex_dtype(t.dtype), (out.dtype, t.dtype)
117 return out
118
119 def fft_transpose_rule(t, operand, fft_type, fft_lengths):
120 if fft_type == xla_client.FftType.RFFT:
121 result = _rfft_transpose(t, fft_lengths)
122 elif fft_type == xla_client.FftType.IRFFT:
123 result = _irfft_transpose(t, fft_lengths)
124 else:
125 result = fft(t, fft_type, fft_lengths)
126 return result,
127
128 def fft_batching_rule(batched_args, batch_dims, fft_type, fft_lengths):
129 x, = batched_args
130 bd, = batch_dims
131 x = batching.moveaxis(x, bd, 0)
132 return fft(x, fft_type, fft_lengths), 0
133
134 fft_p = Primitive('fft')
135 fft_p.def_impl(fft_impl)
136 fft_p.def_abstract_eval(fft_abstract_eval)
137 xla.translations[fft_p] = fft_translation_rule
138 ad.deflinear2(fft_p, fft_transpose_rule)
139 batching.primitive_batchers[fft_p] = fft_batching_rule
140 if pocketfft:
141 xla.backend_specific_translations['cpu'][fft_p] = pocketfft.pocketfft
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/jax/_src/lax/fft.py b/jax/_src/lax/fft.py
--- a/jax/_src/lax/fft.py
+++ b/jax/_src/lax/fft.py
@@ -114,7 +114,9 @@
scale = 1 / prod(fft_lengths)
out = scale * mask * x
assert out.dtype == _complex_dtype(t.dtype), (out.dtype, t.dtype)
- return out
+ # Use JAX's convention for complex gradients
+ # https://github.com/google/jax/issues/6223#issuecomment-807740707
+ return lax.conj(out)
def fft_transpose_rule(t, operand, fft_type, fft_lengths):
if fft_type == xla_client.FftType.RFFT:
|
{"golden_diff": "diff --git a/jax/_src/lax/fft.py b/jax/_src/lax/fft.py\n--- a/jax/_src/lax/fft.py\n+++ b/jax/_src/lax/fft.py\n@@ -114,7 +114,9 @@\n scale = 1 / prod(fft_lengths)\n out = scale * mask * x\n assert out.dtype == _complex_dtype(t.dtype), (out.dtype, t.dtype)\n- return out\n+ # Use JAX's convention for complex gradients\n+ # https://github.com/google/jax/issues/6223#issuecomment-807740707\n+ return lax.conj(out)\n \n def fft_transpose_rule(t, operand, fft_type, fft_lengths):\n if fft_type == xla_client.FftType.RFFT:\n", "issue": "linear_transpose involving fft seemingly incorrect\nI was planning to implement functionality that requires me to be able to take the transpose of a complicated linear function, which includes fourier transforms. I noticed that the fft module seems to implement the rules for transposition; the last few lines of https://jax.readthedocs.io/en/latest/_modules/jax/_src/lax/fft.html seem to pertain to it.\r\n\r\nI am not familiar enough with the jax API at present to point out any bugs; but below is a simple test that demonstrates what I believe to be a bug. The build_matrix helper function explicitly constructs the coefficients of the linear function by feeding it all delta functions in sequence. If I linear_transpose my function, that should be identical to the matrix transpose of the built matrix. Yet it doesnt. It seems as if im getting my result in reverse order (plus another off-by-one index bug I think).\r\n\r\nSeems to me like a bug in the implementation of the transpose rules for ffts; but again not qualified myself to spot it.\r\n\r\nWhile on the topic, slightly related question: when viewed as linear operators, convolution and correlation are transposed operators. Should I trust jax to figure out efficient transformations along these lines (assuming the underlying fft rules are bug free); or is it likely optimal for me to figure out how to override the linear transpose of a convolution with my own handcrafted correlation functions (and vice versa)?\r\n\r\n\r\nCode to reproduce:\r\n\r\n```python\r\nimport numpy as np\r\n\r\nimport jax\r\nfrom jax import numpy as jnp\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nnp.random.seed(0)\r\n\r\nsignal = np.cumsum(np.random.randn(2**8))\r\nsignal_jax = jnp.array(signal)\r\n\r\nx = np.linspace(-1, 1, len(signal))\r\npsf = np.clip(0.2 - np.abs(x), 0, 1) * (x > 0)\r\npsf /= psf.sum()\r\npsf_jax = jnp.array(psf)\r\n\r\njrfft = jax.jit(jnp.fft.rfft)\r\njirfft = jax.jit(jnp.fft.irfft)\r\n\r\n\r\[email protected]\r\ndef convolve(a, b):\r\n\tfa = jrfft(a)\r\n\tfb = jrfft(b)\r\n\treturn jirfft(fa * fb)\r\n\r\n\r\[email protected]\r\ndef correlate(a, b):\r\n\t\"\"\"NOTE: can this be implemented as a transposition rule according to:\r\n\thttps://jax.readthedocs.io/en/latest/_modules/jax/_src/lax/fft.html\r\n\t\"\"\"\r\n\tfa = jrfft(a).conj()\r\n\tfb = jrfft(b)\r\n\treturn jirfft(fa * fb)\r\n\r\n\r\ndef psf_convolve(psf):\r\n\t\"\"\"statically bind psf arg\"\"\"\r\n\tpsf = jax.numpy.fft.ifftshift(psf)\r\n\treturn lambda a: convolve(psf, a)\r\n\r\n\r\ndef psf_correlate(psf):\r\n\t\"\"\"statically bind psf arg. psf assumed to be centered\"\"\"\r\n\tpsf = jax.numpy.fft.ifftshift(psf)\r\n\treturn lambda a: correlate(psf, a)\r\n\r\n\r\nimport types\r\n\r\ndef build_matrix(func, shape):\r\n\t\"\"\"explicitly evaluate coeeficient matrix of linear operator func by calling it repeatedly with delta functions\"\"\"\r\n\ti, j = shape\r\n\tZ = []\r\n\tI = jnp.eye(i, j)\r\n\tfor r in range(i):\r\n\t\tz = func(I[r])\r\n\t\tZ.append(z)\r\n\treturn jnp.array(Z)\r\n\r\n\r\nfunc = psf_convolve(psf_jax)\r\n\r\narr = types.SimpleNamespace(shape=signal_jax.shape, dtype=np.float32)\r\nfunc_trans = lambda a: jax.linear_transpose(func, arr)(a)[0]\r\n\r\nN = len(signal)\r\n\r\nplt.figure()\r\nM = build_matrix(func, (N, N)).T\r\nplt.imshow(M)\r\n\r\nplt.figure()\r\nM = build_matrix(func_trans, (N, N))\r\nplt.imshow(M)\r\nplt.show()\r\n```\r\n\r\n- [ ] If applicable, include full error messages/tracebacks.\r\n\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom functools import partial\n\nimport numpy as np\n\nfrom jax.api import jit, linear_transpose, ShapeDtypeStruct\nfrom jax.core import Primitive\nfrom jax.interpreters import xla\nfrom jax._src.util import prod\nfrom jax import dtypes, lax\nfrom jax.lib import xla_client\nfrom jax.interpreters import ad\nfrom jax.interpreters import batching\nfrom jax.lib import pocketfft\n\nxops = xla_client.ops\n\n__all__ = [\n \"fft\",\n \"fft_p\",\n]\n\ndef _promote_to_complex(arg):\n dtype = dtypes.result_type(arg, np.complex64)\n return lax.convert_element_type(arg, dtype)\n\ndef _promote_to_real(arg):\n dtype = dtypes.result_type(arg, np.float32)\n return lax.convert_element_type(arg, dtype)\n\ndef fft(x, fft_type, fft_lengths):\n if fft_type == xla_client.FftType.RFFT:\n if np.iscomplexobj(x):\n raise ValueError(\"only real valued inputs supported for rfft\")\n x = _promote_to_real(x)\n else:\n x = _promote_to_complex(x)\n if len(fft_lengths) == 0:\n # XLA FFT doesn't support 0-rank.\n return x\n fft_lengths = tuple(fft_lengths)\n return fft_p.bind(x, fft_type=fft_type, fft_lengths=fft_lengths)\n\ndef fft_impl(x, fft_type, fft_lengths):\n return xla.apply_primitive(fft_p, x, fft_type=fft_type, fft_lengths=fft_lengths)\n\n_complex_dtype = lambda dtype: (np.zeros((), dtype) + np.zeros((), np.complex64)).dtype\n_real_dtype = lambda dtype: np.zeros((), dtype).real.dtype\n_is_even = lambda x: x % 2 == 0\n\ndef fft_abstract_eval(x, fft_type, fft_lengths):\n if fft_type == xla_client.FftType.RFFT:\n shape = (x.shape[:-len(fft_lengths)] + fft_lengths[:-1]\n + (fft_lengths[-1] // 2 + 1,))\n dtype = _complex_dtype(x.dtype)\n elif fft_type == xla_client.FftType.IRFFT:\n shape = x.shape[:-len(fft_lengths)] + fft_lengths\n dtype = _real_dtype(x.dtype)\n else:\n shape = x.shape\n dtype = x.dtype\n return x.update(shape=shape, dtype=dtype)\n\ndef fft_translation_rule(c, x, fft_type, fft_lengths):\n return xops.Fft(x, fft_type, fft_lengths)\n\ndef _naive_rfft(x, fft_lengths):\n y = fft(x, xla_client.FftType.FFT, fft_lengths)\n n = fft_lengths[-1]\n return y[..., : n//2 + 1]\n\n@partial(jit, static_argnums=1)\ndef _rfft_transpose(t, fft_lengths):\n # The transpose of RFFT can't be expressed only in terms of irfft. Instead of\n # manually building up larger twiddle matrices (which would increase the\n # asymptotic complexity and is also rather complicated), we rely JAX to\n # transpose a naive RFFT implementation.\n dummy_shape = t.shape[:-len(fft_lengths)] + fft_lengths\n dummy_primal = ShapeDtypeStruct(dummy_shape, _real_dtype(t.dtype))\n transpose = linear_transpose(\n partial(_naive_rfft, fft_lengths=fft_lengths), dummy_primal)\n result, = transpose(t)\n assert result.dtype == _real_dtype(t.dtype), (result.dtype, t.dtype)\n return result\n\ndef _irfft_transpose(t, fft_lengths):\n # The transpose of IRFFT is the RFFT of the cotangent times a scaling\n # factor and a mask. The mask scales the cotangent for the Hermitian\n # symmetric components of the RFFT by a factor of two, since these components\n # are de-duplicated in the RFFT.\n x = fft(t, xla_client.FftType.RFFT, fft_lengths)\n n = x.shape[-1]\n is_odd = fft_lengths[-1] % 2\n full = partial(lax.full_like, t, dtype=t.dtype)\n mask = lax.concatenate(\n [full(1.0, shape=(1,)),\n full(2.0, shape=(n - 2 + is_odd,)),\n full(1.0, shape=(1 - is_odd,))],\n dimension=0)\n scale = 1 / prod(fft_lengths)\n out = scale * mask * x\n assert out.dtype == _complex_dtype(t.dtype), (out.dtype, t.dtype)\n return out\n\ndef fft_transpose_rule(t, operand, fft_type, fft_lengths):\n if fft_type == xla_client.FftType.RFFT:\n result = _rfft_transpose(t, fft_lengths)\n elif fft_type == xla_client.FftType.IRFFT:\n result = _irfft_transpose(t, fft_lengths)\n else:\n result = fft(t, fft_type, fft_lengths)\n return result,\n\ndef fft_batching_rule(batched_args, batch_dims, fft_type, fft_lengths):\n x, = batched_args\n bd, = batch_dims\n x = batching.moveaxis(x, bd, 0)\n return fft(x, fft_type, fft_lengths), 0\n\nfft_p = Primitive('fft')\nfft_p.def_impl(fft_impl)\nfft_p.def_abstract_eval(fft_abstract_eval)\nxla.translations[fft_p] = fft_translation_rule\nad.deflinear2(fft_p, fft_transpose_rule)\nbatching.primitive_batchers[fft_p] = fft_batching_rule\nif pocketfft:\n xla.backend_specific_translations['cpu'][fft_p] = pocketfft.pocketfft\n", "path": "jax/_src/lax/fft.py"}], "after_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom functools import partial\n\nimport numpy as np\n\nfrom jax.api import jit, linear_transpose, ShapeDtypeStruct\nfrom jax.core import Primitive\nfrom jax.interpreters import xla\nfrom jax._src.util import prod\nfrom jax import dtypes, lax\nfrom jax.lib import xla_client\nfrom jax.interpreters import ad\nfrom jax.interpreters import batching\nfrom jax.lib import pocketfft\n\nxops = xla_client.ops\n\n__all__ = [\n \"fft\",\n \"fft_p\",\n]\n\ndef _promote_to_complex(arg):\n dtype = dtypes.result_type(arg, np.complex64)\n return lax.convert_element_type(arg, dtype)\n\ndef _promote_to_real(arg):\n dtype = dtypes.result_type(arg, np.float32)\n return lax.convert_element_type(arg, dtype)\n\ndef fft(x, fft_type, fft_lengths):\n if fft_type == xla_client.FftType.RFFT:\n if np.iscomplexobj(x):\n raise ValueError(\"only real valued inputs supported for rfft\")\n x = _promote_to_real(x)\n else:\n x = _promote_to_complex(x)\n if len(fft_lengths) == 0:\n # XLA FFT doesn't support 0-rank.\n return x\n fft_lengths = tuple(fft_lengths)\n return fft_p.bind(x, fft_type=fft_type, fft_lengths=fft_lengths)\n\ndef fft_impl(x, fft_type, fft_lengths):\n return xla.apply_primitive(fft_p, x, fft_type=fft_type, fft_lengths=fft_lengths)\n\n_complex_dtype = lambda dtype: (np.zeros((), dtype) + np.zeros((), np.complex64)).dtype\n_real_dtype = lambda dtype: np.zeros((), dtype).real.dtype\n_is_even = lambda x: x % 2 == 0\n\ndef fft_abstract_eval(x, fft_type, fft_lengths):\n if fft_type == xla_client.FftType.RFFT:\n shape = (x.shape[:-len(fft_lengths)] + fft_lengths[:-1]\n + (fft_lengths[-1] // 2 + 1,))\n dtype = _complex_dtype(x.dtype)\n elif fft_type == xla_client.FftType.IRFFT:\n shape = x.shape[:-len(fft_lengths)] + fft_lengths\n dtype = _real_dtype(x.dtype)\n else:\n shape = x.shape\n dtype = x.dtype\n return x.update(shape=shape, dtype=dtype)\n\ndef fft_translation_rule(c, x, fft_type, fft_lengths):\n return xops.Fft(x, fft_type, fft_lengths)\n\ndef _naive_rfft(x, fft_lengths):\n y = fft(x, xla_client.FftType.FFT, fft_lengths)\n n = fft_lengths[-1]\n return y[..., : n//2 + 1]\n\n@partial(jit, static_argnums=1)\ndef _rfft_transpose(t, fft_lengths):\n # The transpose of RFFT can't be expressed only in terms of irfft. Instead of\n # manually building up larger twiddle matrices (which would increase the\n # asymptotic complexity and is also rather complicated), we rely JAX to\n # transpose a naive RFFT implementation.\n dummy_shape = t.shape[:-len(fft_lengths)] + fft_lengths\n dummy_primal = ShapeDtypeStruct(dummy_shape, _real_dtype(t.dtype))\n transpose = linear_transpose(\n partial(_naive_rfft, fft_lengths=fft_lengths), dummy_primal)\n result, = transpose(t)\n assert result.dtype == _real_dtype(t.dtype), (result.dtype, t.dtype)\n return result\n\ndef _irfft_transpose(t, fft_lengths):\n # The transpose of IRFFT is the RFFT of the cotangent times a scaling\n # factor and a mask. The mask scales the cotangent for the Hermitian\n # symmetric components of the RFFT by a factor of two, since these components\n # are de-duplicated in the RFFT.\n x = fft(t, xla_client.FftType.RFFT, fft_lengths)\n n = x.shape[-1]\n is_odd = fft_lengths[-1] % 2\n full = partial(lax.full_like, t, dtype=t.dtype)\n mask = lax.concatenate(\n [full(1.0, shape=(1,)),\n full(2.0, shape=(n - 2 + is_odd,)),\n full(1.0, shape=(1 - is_odd,))],\n dimension=0)\n scale = 1 / prod(fft_lengths)\n out = scale * mask * x\n assert out.dtype == _complex_dtype(t.dtype), (out.dtype, t.dtype)\n # Use JAX's convention for complex gradients\n # https://github.com/google/jax/issues/6223#issuecomment-807740707\n return lax.conj(out)\n\ndef fft_transpose_rule(t, operand, fft_type, fft_lengths):\n if fft_type == xla_client.FftType.RFFT:\n result = _rfft_transpose(t, fft_lengths)\n elif fft_type == xla_client.FftType.IRFFT:\n result = _irfft_transpose(t, fft_lengths)\n else:\n result = fft(t, fft_type, fft_lengths)\n return result,\n\ndef fft_batching_rule(batched_args, batch_dims, fft_type, fft_lengths):\n x, = batched_args\n bd, = batch_dims\n x = batching.moveaxis(x, bd, 0)\n return fft(x, fft_type, fft_lengths), 0\n\nfft_p = Primitive('fft')\nfft_p.def_impl(fft_impl)\nfft_p.def_abstract_eval(fft_abstract_eval)\nxla.translations[fft_p] = fft_translation_rule\nad.deflinear2(fft_p, fft_transpose_rule)\nbatching.primitive_batchers[fft_p] = fft_batching_rule\nif pocketfft:\n xla.backend_specific_translations['cpu'][fft_p] = pocketfft.pocketfft\n", "path": "jax/_src/lax/fft.py"}]}
| 2,807 | 185 |
gh_patches_debug_15031
|
rasdani/github-patches
|
git_diff
|
cloud-custodian__cloud-custodian-6989
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mugc ignored Global Services policy with region and did not remove policy from other regions
**Describe the bug**
A clear and concise description of what the bug is.
According to documentation `https://cloudcustodian.io/docs/aws/examples/accountservicelimit.html` added region to one of our policies. Did redeploy and pipeline c7n-org step did update policy in specified us-east-1 region, but mugc step did not remove from us-east-2
**To Reproduce**
Steps to reproduce the behavior:
deploy policy for resource s3 in 2 regions: us-east-1 and us-east-2
add region: us-east-1 to the policy
deploy using c7n-org
run mugc
**Expected behavior**
A clear and concise description of what you expected to happen.
I would expect mugc to remove policy from all other regions
**Background (please complete the following information):**
- Python Version: [e.g. python 3.8.1] virtual environment CPython3.8.3.final.0-64
- Custodian Version: [e.g. 0.8.46.1] c7n 0.9.6, c7n-mailer 0.6.5, c7n-org 0.6.5
- Tool Version: [if applicable] codebuild pipeline
- Cloud Provider: [e.g. gcp, aws, azure] aws
- Policy: [please exclude any account/sensitive information]
```yaml
policies:
- name: list-buckets
resource: s3
region: us-east-1
```
**Additional context**
Add any other context about the problem here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/ops/mugc.py`
Content:
```
1 # Copyright The Cloud Custodian Authors.
2 # SPDX-License-Identifier: Apache-2.0
3 import argparse
4 import itertools
5 import json
6 import os
7 import re
8 import logging
9 import sys
10
11 from c7n.credentials import SessionFactory
12 from c7n.config import Config
13 from c7n.policy import load as policy_load, PolicyCollection
14 from c7n import mu
15
16 # TODO: mugc has alot of aws assumptions
17
18 from c7n.resources.aws import AWS
19 from botocore.exceptions import ClientError
20
21
22 log = logging.getLogger('mugc')
23
24
25 def load_policies(options, config):
26 policies = PolicyCollection([], config)
27 for f in options.config_files:
28 policies += policy_load(config, f).filter(options.policy_filter)
29 return policies
30
31
32 def region_gc(options, region, policy_config, policies):
33
34 log.debug("Region:%s Starting garbage collection", region)
35 session_factory = SessionFactory(
36 region=region,
37 assume_role=policy_config.assume_role,
38 profile=policy_config.profile,
39 external_id=policy_config.external_id)
40
41 manager = mu.LambdaManager(session_factory)
42 funcs = list(manager.list_functions(options.prefix))
43 client = session_factory().client('lambda')
44
45 remove = []
46 current_policies = [p.name for p in policies]
47 pattern = re.compile(options.policy_regex)
48 for f in funcs:
49 if not pattern.match(f['FunctionName']):
50 continue
51 match = False
52 for pn in current_policies:
53 if f['FunctionName'].endswith(pn):
54 match = True
55 if options.present:
56 if match:
57 remove.append(f)
58 elif not match:
59 remove.append(f)
60
61 for n in remove:
62 events = []
63 try:
64 result = client.get_policy(FunctionName=n['FunctionName'])
65 except ClientError as e:
66 if e.response['Error']['Code'] == 'ResourceNotFoundException':
67 log.warning(
68 "Region:%s Lambda Function or Access Policy Statement missing: %s",
69 region, n['FunctionName'])
70 else:
71 log.warning(
72 "Region:%s Unexpected error: %s for function %s",
73 region, e, n['FunctionName'])
74
75 # Continue on with next function instead of raising an exception
76 continue
77
78 if 'Policy' not in result:
79 pass
80 else:
81 p = json.loads(result['Policy'])
82 for s in p['Statement']:
83 principal = s.get('Principal')
84 if not isinstance(principal, dict):
85 log.info("Skipping function %s" % n['FunctionName'])
86 continue
87 if principal == {'Service': 'events.amazonaws.com'}:
88 events.append(
89 mu.CloudWatchEventSource({}, session_factory))
90 elif principal == {'Service': 'config.amazonaws.com'}:
91 events.append(
92 mu.ConfigRule({}, session_factory))
93
94 f = mu.LambdaFunction({
95 'name': n['FunctionName'],
96 'role': n['Role'],
97 'handler': n['Handler'],
98 'timeout': n['Timeout'],
99 'memory_size': n['MemorySize'],
100 'description': n['Description'],
101 'runtime': n['Runtime'],
102 'events': events}, None)
103
104 log.info("Region:%s Removing %s", region, n['FunctionName'])
105 if options.dryrun:
106 log.info("Dryrun skipping removal")
107 continue
108 manager.remove(f)
109 log.info("Region:%s Removed %s", region, n['FunctionName'])
110
111
112 def resources_gc_prefix(options, policy_config, policy_collection):
113 """Garbage collect old custodian policies based on prefix.
114
115 We attempt to introspect to find the event sources for a policy
116 but without the old configuration this is implicit.
117 """
118
119 # Classify policies by region
120 policy_regions = {}
121 for p in policy_collection:
122 if p.execution_mode == 'poll':
123 continue
124 policy_regions.setdefault(p.options.region, []).append(p)
125
126 regions = get_gc_regions(options.regions, policy_config)
127 for r in regions:
128 region_gc(options, r, policy_config, policy_regions.get(r, []))
129
130
131 def get_gc_regions(regions, policy_config):
132 if 'all' in regions:
133 session_factory = SessionFactory(
134 region='us-east-1',
135 assume_role=policy_config.assume_role,
136 profile=policy_config.profile,
137 external_id=policy_config.external_id)
138
139 client = session_factory().client('ec2')
140 return [region['RegionName'] for region in client.describe_regions()['Regions']]
141 return regions
142
143
144 def setup_parser():
145 parser = argparse.ArgumentParser()
146 parser.add_argument("configs", nargs='*', help="Policy configuration file(s)")
147 parser.add_argument(
148 '-c', '--config', dest="config_files", nargs="*", action='append',
149 help="Policy configuration files(s)", default=[])
150 parser.add_argument(
151 "--present", action="store_true", default=False,
152 help='Target policies present in config files for removal instead of skipping them.')
153 parser.add_argument(
154 '-r', '--region', action='append', dest='regions', metavar='REGION',
155 help="AWS Region to target. Can be used multiple times, also supports `all`")
156 parser.add_argument('--dryrun', action="store_true", default=False)
157 parser.add_argument(
158 "--profile", default=os.environ.get('AWS_PROFILE'),
159 help="AWS Account Config File Profile to utilize")
160 parser.add_argument(
161 "--prefix", default="custodian-",
162 help="The Lambda name prefix to use for clean-up")
163 parser.add_argument(
164 "--policy-regex",
165 help="The policy must match the regex")
166 parser.add_argument("-p", "--policies", default=None, dest='policy_filter',
167 help="Only use named/matched policies")
168 parser.add_argument(
169 "--assume", default=None, dest="assume_role",
170 help="Role to assume")
171 parser.add_argument(
172 "-v", dest="verbose", action="store_true", default=False,
173 help='toggle verbose logging')
174 return parser
175
176
177 def main():
178 parser = setup_parser()
179 options = parser.parse_args()
180
181 log_level = logging.INFO
182 if options.verbose:
183 log_level = logging.DEBUG
184 logging.basicConfig(
185 level=log_level,
186 format="%(asctime)s: %(name)s:%(levelname)s %(message)s")
187 logging.getLogger('botocore').setLevel(logging.ERROR)
188 logging.getLogger('urllib3').setLevel(logging.ERROR)
189 logging.getLogger('c7n.cache').setLevel(logging.WARNING)
190
191 if not options.policy_regex:
192 options.policy_regex = f"^{options.prefix}.*"
193
194 if not options.regions:
195 options.regions = [os.environ.get('AWS_DEFAULT_REGION', 'us-east-1')]
196
197 files = []
198 files.extend(itertools.chain(*options.config_files))
199 files.extend(options.configs)
200 options.config_files = files
201
202 if not files:
203 parser.print_help()
204 sys.exit(1)
205
206 policy_config = Config.empty(
207 regions=options.regions,
208 profile=options.profile,
209 assume_role=options.assume_role)
210
211 # use cloud provider to initialize policies to get region expansion
212 policies = AWS().initialize_policies(
213 PolicyCollection([
214 p for p in load_policies(
215 options, policy_config)
216 if p.provider_name == 'aws'],
217 policy_config),
218 policy_config)
219
220 resources_gc_prefix(options, policy_config, policies)
221
222
223 if __name__ == '__main__':
224 main()
225
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tools/ops/mugc.py b/tools/ops/mugc.py
--- a/tools/ops/mugc.py
+++ b/tools/ops/mugc.py
@@ -43,15 +43,15 @@
client = session_factory().client('lambda')
remove = []
- current_policies = [p.name for p in policies]
pattern = re.compile(options.policy_regex)
for f in funcs:
if not pattern.match(f['FunctionName']):
continue
match = False
- for pn in current_policies:
- if f['FunctionName'].endswith(pn):
- match = True
+ for p in policies:
+ if f['FunctionName'].endswith(p.name):
+ if 'region' not in p.data or p.data['region'] == region:
+ match = True
if options.present:
if match:
remove.append(f)
|
{"golden_diff": "diff --git a/tools/ops/mugc.py b/tools/ops/mugc.py\n--- a/tools/ops/mugc.py\n+++ b/tools/ops/mugc.py\n@@ -43,15 +43,15 @@\n client = session_factory().client('lambda')\n \n remove = []\n- current_policies = [p.name for p in policies]\n pattern = re.compile(options.policy_regex)\n for f in funcs:\n if not pattern.match(f['FunctionName']):\n continue\n match = False\n- for pn in current_policies:\n- if f['FunctionName'].endswith(pn):\n- match = True\n+ for p in policies:\n+ if f['FunctionName'].endswith(p.name):\n+ if 'region' not in p.data or p.data['region'] == region:\n+ match = True\n if options.present:\n if match:\n remove.append(f)\n", "issue": "Mugc ignored Global Services policy with region and did not remove policy from other regions\n**Describe the bug**\r\nA clear and concise description of what the bug is.\r\nAccording to documentation `https://cloudcustodian.io/docs/aws/examples/accountservicelimit.html` added region to one of our policies. Did redeploy and pipeline c7n-org step did update policy in specified us-east-1 region, but mugc step did not remove from us-east-2\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\ndeploy policy for resource s3 in 2 regions: us-east-1 and us-east-2\r\nadd region: us-east-1 to the policy\r\ndeploy using c7n-org\r\nrun mugc\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\nI would expect mugc to remove policy from all other regions\r\n\r\n**Background (please complete the following information):**\r\n - Python Version: [e.g. python 3.8.1] virtual environment CPython3.8.3.final.0-64\r\n - Custodian Version: [e.g. 0.8.46.1] c7n 0.9.6, c7n-mailer 0.6.5, c7n-org 0.6.5\r\n - Tool Version: [if applicable] codebuild pipeline\r\n - Cloud Provider: [e.g. gcp, aws, azure] aws\r\n - Policy: [please exclude any account/sensitive information]\r\n```yaml\r\npolicies: \r\n - name: list-buckets\r\n resource: s3\r\n region: us-east-1\r\n```\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "# Copyright The Cloud Custodian Authors.\n# SPDX-License-Identifier: Apache-2.0\nimport argparse\nimport itertools\nimport json\nimport os\nimport re\nimport logging\nimport sys\n\nfrom c7n.credentials import SessionFactory\nfrom c7n.config import Config\nfrom c7n.policy import load as policy_load, PolicyCollection\nfrom c7n import mu\n\n# TODO: mugc has alot of aws assumptions\n\nfrom c7n.resources.aws import AWS\nfrom botocore.exceptions import ClientError\n\n\nlog = logging.getLogger('mugc')\n\n\ndef load_policies(options, config):\n policies = PolicyCollection([], config)\n for f in options.config_files:\n policies += policy_load(config, f).filter(options.policy_filter)\n return policies\n\n\ndef region_gc(options, region, policy_config, policies):\n\n log.debug(\"Region:%s Starting garbage collection\", region)\n session_factory = SessionFactory(\n region=region,\n assume_role=policy_config.assume_role,\n profile=policy_config.profile,\n external_id=policy_config.external_id)\n\n manager = mu.LambdaManager(session_factory)\n funcs = list(manager.list_functions(options.prefix))\n client = session_factory().client('lambda')\n\n remove = []\n current_policies = [p.name for p in policies]\n pattern = re.compile(options.policy_regex)\n for f in funcs:\n if not pattern.match(f['FunctionName']):\n continue\n match = False\n for pn in current_policies:\n if f['FunctionName'].endswith(pn):\n match = True\n if options.present:\n if match:\n remove.append(f)\n elif not match:\n remove.append(f)\n\n for n in remove:\n events = []\n try:\n result = client.get_policy(FunctionName=n['FunctionName'])\n except ClientError as e:\n if e.response['Error']['Code'] == 'ResourceNotFoundException':\n log.warning(\n \"Region:%s Lambda Function or Access Policy Statement missing: %s\",\n region, n['FunctionName'])\n else:\n log.warning(\n \"Region:%s Unexpected error: %s for function %s\",\n region, e, n['FunctionName'])\n\n # Continue on with next function instead of raising an exception\n continue\n\n if 'Policy' not in result:\n pass\n else:\n p = json.loads(result['Policy'])\n for s in p['Statement']:\n principal = s.get('Principal')\n if not isinstance(principal, dict):\n log.info(\"Skipping function %s\" % n['FunctionName'])\n continue\n if principal == {'Service': 'events.amazonaws.com'}:\n events.append(\n mu.CloudWatchEventSource({}, session_factory))\n elif principal == {'Service': 'config.amazonaws.com'}:\n events.append(\n mu.ConfigRule({}, session_factory))\n\n f = mu.LambdaFunction({\n 'name': n['FunctionName'],\n 'role': n['Role'],\n 'handler': n['Handler'],\n 'timeout': n['Timeout'],\n 'memory_size': n['MemorySize'],\n 'description': n['Description'],\n 'runtime': n['Runtime'],\n 'events': events}, None)\n\n log.info(\"Region:%s Removing %s\", region, n['FunctionName'])\n if options.dryrun:\n log.info(\"Dryrun skipping removal\")\n continue\n manager.remove(f)\n log.info(\"Region:%s Removed %s\", region, n['FunctionName'])\n\n\ndef resources_gc_prefix(options, policy_config, policy_collection):\n \"\"\"Garbage collect old custodian policies based on prefix.\n\n We attempt to introspect to find the event sources for a policy\n but without the old configuration this is implicit.\n \"\"\"\n\n # Classify policies by region\n policy_regions = {}\n for p in policy_collection:\n if p.execution_mode == 'poll':\n continue\n policy_regions.setdefault(p.options.region, []).append(p)\n\n regions = get_gc_regions(options.regions, policy_config)\n for r in regions:\n region_gc(options, r, policy_config, policy_regions.get(r, []))\n\n\ndef get_gc_regions(regions, policy_config):\n if 'all' in regions:\n session_factory = SessionFactory(\n region='us-east-1',\n assume_role=policy_config.assume_role,\n profile=policy_config.profile,\n external_id=policy_config.external_id)\n\n client = session_factory().client('ec2')\n return [region['RegionName'] for region in client.describe_regions()['Regions']]\n return regions\n\n\ndef setup_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"configs\", nargs='*', help=\"Policy configuration file(s)\")\n parser.add_argument(\n '-c', '--config', dest=\"config_files\", nargs=\"*\", action='append',\n help=\"Policy configuration files(s)\", default=[])\n parser.add_argument(\n \"--present\", action=\"store_true\", default=False,\n help='Target policies present in config files for removal instead of skipping them.')\n parser.add_argument(\n '-r', '--region', action='append', dest='regions', metavar='REGION',\n help=\"AWS Region to target. Can be used multiple times, also supports `all`\")\n parser.add_argument('--dryrun', action=\"store_true\", default=False)\n parser.add_argument(\n \"--profile\", default=os.environ.get('AWS_PROFILE'),\n help=\"AWS Account Config File Profile to utilize\")\n parser.add_argument(\n \"--prefix\", default=\"custodian-\",\n help=\"The Lambda name prefix to use for clean-up\")\n parser.add_argument(\n \"--policy-regex\",\n help=\"The policy must match the regex\")\n parser.add_argument(\"-p\", \"--policies\", default=None, dest='policy_filter',\n help=\"Only use named/matched policies\")\n parser.add_argument(\n \"--assume\", default=None, dest=\"assume_role\",\n help=\"Role to assume\")\n parser.add_argument(\n \"-v\", dest=\"verbose\", action=\"store_true\", default=False,\n help='toggle verbose logging')\n return parser\n\n\ndef main():\n parser = setup_parser()\n options = parser.parse_args()\n\n log_level = logging.INFO\n if options.verbose:\n log_level = logging.DEBUG\n logging.basicConfig(\n level=log_level,\n format=\"%(asctime)s: %(name)s:%(levelname)s %(message)s\")\n logging.getLogger('botocore').setLevel(logging.ERROR)\n logging.getLogger('urllib3').setLevel(logging.ERROR)\n logging.getLogger('c7n.cache').setLevel(logging.WARNING)\n\n if not options.policy_regex:\n options.policy_regex = f\"^{options.prefix}.*\"\n\n if not options.regions:\n options.regions = [os.environ.get('AWS_DEFAULT_REGION', 'us-east-1')]\n\n files = []\n files.extend(itertools.chain(*options.config_files))\n files.extend(options.configs)\n options.config_files = files\n\n if not files:\n parser.print_help()\n sys.exit(1)\n\n policy_config = Config.empty(\n regions=options.regions,\n profile=options.profile,\n assume_role=options.assume_role)\n\n # use cloud provider to initialize policies to get region expansion\n policies = AWS().initialize_policies(\n PolicyCollection([\n p for p in load_policies(\n options, policy_config)\n if p.provider_name == 'aws'],\n policy_config),\n policy_config)\n\n resources_gc_prefix(options, policy_config, policies)\n\n\nif __name__ == '__main__':\n main()\n", "path": "tools/ops/mugc.py"}], "after_files": [{"content": "# Copyright The Cloud Custodian Authors.\n# SPDX-License-Identifier: Apache-2.0\nimport argparse\nimport itertools\nimport json\nimport os\nimport re\nimport logging\nimport sys\n\nfrom c7n.credentials import SessionFactory\nfrom c7n.config import Config\nfrom c7n.policy import load as policy_load, PolicyCollection\nfrom c7n import mu\n\n# TODO: mugc has alot of aws assumptions\n\nfrom c7n.resources.aws import AWS\nfrom botocore.exceptions import ClientError\n\n\nlog = logging.getLogger('mugc')\n\n\ndef load_policies(options, config):\n policies = PolicyCollection([], config)\n for f in options.config_files:\n policies += policy_load(config, f).filter(options.policy_filter)\n return policies\n\n\ndef region_gc(options, region, policy_config, policies):\n\n log.debug(\"Region:%s Starting garbage collection\", region)\n session_factory = SessionFactory(\n region=region,\n assume_role=policy_config.assume_role,\n profile=policy_config.profile,\n external_id=policy_config.external_id)\n\n manager = mu.LambdaManager(session_factory)\n funcs = list(manager.list_functions(options.prefix))\n client = session_factory().client('lambda')\n\n remove = []\n pattern = re.compile(options.policy_regex)\n for f in funcs:\n if not pattern.match(f['FunctionName']):\n continue\n match = False\n for p in policies:\n if f['FunctionName'].endswith(p.name):\n if 'region' not in p.data or p.data['region'] == region:\n match = True\n if options.present:\n if match:\n remove.append(f)\n elif not match:\n remove.append(f)\n\n for n in remove:\n events = []\n try:\n result = client.get_policy(FunctionName=n['FunctionName'])\n except ClientError as e:\n if e.response['Error']['Code'] == 'ResourceNotFoundException':\n log.warning(\n \"Region:%s Lambda Function or Access Policy Statement missing: %s\",\n region, n['FunctionName'])\n else:\n log.warning(\n \"Region:%s Unexpected error: %s for function %s\",\n region, e, n['FunctionName'])\n\n # Continue on with next function instead of raising an exception\n continue\n\n if 'Policy' not in result:\n pass\n else:\n p = json.loads(result['Policy'])\n for s in p['Statement']:\n principal = s.get('Principal')\n if not isinstance(principal, dict):\n log.info(\"Skipping function %s\" % n['FunctionName'])\n continue\n if principal == {'Service': 'events.amazonaws.com'}:\n events.append(\n mu.CloudWatchEventSource({}, session_factory))\n elif principal == {'Service': 'config.amazonaws.com'}:\n events.append(\n mu.ConfigRule({}, session_factory))\n\n f = mu.LambdaFunction({\n 'name': n['FunctionName'],\n 'role': n['Role'],\n 'handler': n['Handler'],\n 'timeout': n['Timeout'],\n 'memory_size': n['MemorySize'],\n 'description': n['Description'],\n 'runtime': n['Runtime'],\n 'events': events}, None)\n\n log.info(\"Region:%s Removing %s\", region, n['FunctionName'])\n if options.dryrun:\n log.info(\"Dryrun skipping removal\")\n continue\n manager.remove(f)\n log.info(\"Region:%s Removed %s\", region, n['FunctionName'])\n\n\ndef resources_gc_prefix(options, policy_config, policy_collection):\n \"\"\"Garbage collect old custodian policies based on prefix.\n\n We attempt to introspect to find the event sources for a policy\n but without the old configuration this is implicit.\n \"\"\"\n\n # Classify policies by region\n policy_regions = {}\n for p in policy_collection:\n if p.execution_mode == 'poll':\n continue\n policy_regions.setdefault(p.options.region, []).append(p)\n\n regions = get_gc_regions(options.regions, policy_config)\n for r in regions:\n region_gc(options, r, policy_config, policy_regions.get(r, []))\n\n\ndef get_gc_regions(regions, policy_config):\n if 'all' in regions:\n session_factory = SessionFactory(\n region='us-east-1',\n assume_role=policy_config.assume_role,\n profile=policy_config.profile,\n external_id=policy_config.external_id)\n\n client = session_factory().client('ec2')\n return [region['RegionName'] for region in client.describe_regions()['Regions']]\n return regions\n\n\ndef setup_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"configs\", nargs='*', help=\"Policy configuration file(s)\")\n parser.add_argument(\n '-c', '--config', dest=\"config_files\", nargs=\"*\", action='append',\n help=\"Policy configuration files(s)\", default=[])\n parser.add_argument(\n \"--present\", action=\"store_true\", default=False,\n help='Target policies present in config files for removal instead of skipping them.')\n parser.add_argument(\n '-r', '--region', action='append', dest='regions', metavar='REGION',\n help=\"AWS Region to target. Can be used multiple times, also supports `all`\")\n parser.add_argument('--dryrun', action=\"store_true\", default=False)\n parser.add_argument(\n \"--profile\", default=os.environ.get('AWS_PROFILE'),\n help=\"AWS Account Config File Profile to utilize\")\n parser.add_argument(\n \"--prefix\", default=\"custodian-\",\n help=\"The Lambda name prefix to use for clean-up\")\n parser.add_argument(\n \"--policy-regex\",\n help=\"The policy must match the regex\")\n parser.add_argument(\"-p\", \"--policies\", default=None, dest='policy_filter',\n help=\"Only use named/matched policies\")\n parser.add_argument(\n \"--assume\", default=None, dest=\"assume_role\",\n help=\"Role to assume\")\n parser.add_argument(\n \"-v\", dest=\"verbose\", action=\"store_true\", default=False,\n help='toggle verbose logging')\n return parser\n\n\ndef main():\n parser = setup_parser()\n options = parser.parse_args()\n\n log_level = logging.INFO\n if options.verbose:\n log_level = logging.DEBUG\n logging.basicConfig(\n level=log_level,\n format=\"%(asctime)s: %(name)s:%(levelname)s %(message)s\")\n logging.getLogger('botocore').setLevel(logging.ERROR)\n logging.getLogger('urllib3').setLevel(logging.ERROR)\n logging.getLogger('c7n.cache').setLevel(logging.WARNING)\n\n if not options.policy_regex:\n options.policy_regex = f\"^{options.prefix}.*\"\n\n if not options.regions:\n options.regions = [os.environ.get('AWS_DEFAULT_REGION', 'us-east-1')]\n\n files = []\n files.extend(itertools.chain(*options.config_files))\n files.extend(options.configs)\n options.config_files = files\n\n if not files:\n parser.print_help()\n sys.exit(1)\n\n policy_config = Config.empty(\n regions=options.regions,\n profile=options.profile,\n assume_role=options.assume_role)\n\n # use cloud provider to initialize policies to get region expansion\n policies = AWS().initialize_policies(\n PolicyCollection([\n p for p in load_policies(\n options, policy_config)\n if p.provider_name == 'aws'],\n policy_config),\n policy_config)\n\n resources_gc_prefix(options, policy_config, policies)\n\n\nif __name__ == '__main__':\n main()\n", "path": "tools/ops/mugc.py"}]}
| 2,794 | 203 |
gh_patches_debug_25448
|
rasdani/github-patches
|
git_diff
|
yt-project__yt-2922
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fresh yt installation not importing
<!--To help us understand and resolve your issue, please fill out the form to
the best of your ability.-->
<!--You can feel free to delete the sections that do not apply.-->
### Bug report
**Bug summary**
yt fails to import on a fresh development installation. The error is that `ModuleNotFoundError: No module named 'yt.utilities.lib.misc_utilities'` Similar to [Issue 2685](https://github.com/yt-project/yt/issues/2685)
<!--A short 1-2 sentences that succinctly describes the bug-->
**Code for reproduction**
Installed using `pip install git+git://github.com/yt-project/yt.git`
<!--A minimum code snippet required to reproduce the bug, also minimizing the
number of dependencies required.-->
<!-- If you need to use a data file to trigger the issue you're having, consider
using one of the datasets from the yt data hub (http://yt-project.org/data). If
your issue cannot be triggered using a public dataset, you can use the yt
curldrop (https://docs.hub.yt/services.html#curldrop) to share data
files. Please include a link to the dataset in the issue if you use the
curldrop.-->
```python
import yt
```
**Actual outcome**
<!--The output produced by the above code, which may be a screenshot, console
output, etc.-->
```
---------------------------------------------------------------------------
ModuleNotFoundError Traceback (most recent call last)
<ipython-input-1-2d2292a375dc> in <module>
----> 1 import yt
~/.conda/envs/ytgit/lib/python3.8/site-packages/yt/__init__.py in <module>
63 )
64
---> 65 from yt.fields.api import (
66 field_plugins,
67 DerivedField,
~/.conda/envs/ytgit/lib/python3.8/site-packages/yt/fields/api.py in <module>
1 # from . import species_fields
----> 2 from . import (
3 angular_momentum,
4 astro_fields,
5 cosmology_fields,
~/.conda/envs/ytgit/lib/python3.8/site-packages/yt/fields/angular_momentum.py in <module>
1 import numpy as np
2
----> 3 from yt.utilities.lib.misc_utilities import (
4 obtain_position_vector,
5 obtain_relative_velocity_vector,
ModuleNotFoundError: No module named 'yt.utilities.lib.misc_utilities'
```
**Expected outcome**
<!--A description of the expected outcome from the code snippet-->
<!--If this used to work in an earlier version of yt, please note the
version it used to work on-->
**Version Information**
<!--Please specify your platform and versions of the relevant libraries you are
using:-->
* Operating System: CentOS Linux 7 (Core)x86_64
* Python Version: 3.8.5
* yt version: 4.0.dev0
* Other Libraries (if applicable): N/A
<!--Please tell us how you installed yt and python e.g., from source,
pip, conda. If you installed from conda, please specify which channel you used
if not the default-->
Installed using `pip install git+git://github.com/yt-project/yt.git`
Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import glob
2 import os
3 import sys
4 from distutils.ccompiler import get_default_compiler
5 from distutils.version import LooseVersion
6
7 import pkg_resources
8 from setuptools import find_packages, setup
9
10 from setupext import (
11 check_for_openmp,
12 check_for_pyembree,
13 create_build_ext,
14 install_ccompiler,
15 )
16
17 install_ccompiler()
18
19 try:
20 distribute_ver = LooseVersion(pkg_resources.get_distribution("distribute").version)
21 if distribute_ver < LooseVersion("0.7.3"):
22 print("Distribute is a legacy package obsoleted by setuptools.")
23 print("We strongly recommend that you just uninstall it.")
24 print("If for some reason you cannot do it, you'll need to upgrade it")
25 print("to latest version before proceeding:")
26 print(" pip install -U distribute")
27 sys.exit(1)
28 except pkg_resources.DistributionNotFound:
29 pass # yay!
30
31 VERSION = "4.0.dev0"
32
33 if os.path.exists("MANIFEST"):
34 os.remove("MANIFEST")
35
36 with open("README.md") as file:
37 long_description = file.read()
38
39 if check_for_openmp():
40 omp_args = ["-fopenmp"]
41 else:
42 omp_args = []
43
44 if os.name == "nt":
45 std_libs = []
46 else:
47 std_libs = ["m"]
48
49 if get_default_compiler() == "msvc":
50 CPP14_FLAG = ["/std:c++14"]
51 else:
52 CPP14_FLAG = ["--std=c++14"]
53
54 cythonize_aliases = {
55 "LIB_DIR": "yt/utilities/lib/",
56 "LIB_DIR_EWAH": ["yt/utilities/lib/", "yt/utilities/lib/ewahboolarray/"],
57 "LIB_DIR_GEOM": ["yt/utilities/lib/", "yt/geometry/"],
58 "LIB_DIR_GEOM_ARTIO": [
59 "yt/utilities/lib/",
60 "yt/geometry/",
61 "yt/frontends/artio/artio_headers/",
62 ],
63 "STD_LIBS": std_libs,
64 "OMP_ARGS": omp_args,
65 "FIXED_INTERP": "yt/utilities/lib/fixed_interpolator.cpp",
66 "ARTIO_SOURCE": glob.glob("yt/frontends/artio/artio_headers/*.c"),
67 "CPP14_FLAG": CPP14_FLAG,
68 }
69
70 lib_exts = [
71 "yt/geometry/*.pyx",
72 "yt/utilities/cython_fortran_utils.pyx",
73 "yt/frontends/ramses/io_utils.pyx",
74 "yt/utilities/lib/cykdtree/kdtree.pyx",
75 "yt/utilities/lib/cykdtree/utils.pyx",
76 "yt/frontends/artio/_artio_caller.pyx",
77 "yt/utilities/lib/*.pyx",
78 ]
79
80 embree_libs, embree_aliases = check_for_pyembree(std_libs)
81 cythonize_aliases.update(embree_aliases)
82 lib_exts += embree_libs
83
84 # This overrides using lib_exts, so it has to happen after lib_exts is fully defined
85 build_ext, sdist = create_build_ext(lib_exts, cythonize_aliases)
86
87 if __name__ == "__main__":
88 setup(
89 name="yt",
90 version=VERSION,
91 description="An analysis and visualization toolkit for volumetric data",
92 long_description=long_description,
93 long_description_content_type="text/markdown",
94 classifiers=[
95 "Development Status :: 5 - Production/Stable",
96 "Environment :: Console",
97 "Intended Audience :: Science/Research",
98 "License :: OSI Approved :: BSD License",
99 "Operating System :: MacOS :: MacOS X",
100 "Operating System :: POSIX :: AIX",
101 "Operating System :: POSIX :: Linux",
102 "Programming Language :: C",
103 "Programming Language :: Python :: 3",
104 "Programming Language :: Python :: 3.5",
105 "Programming Language :: Python :: 3.6",
106 "Programming Language :: Python :: 3.7",
107 "Programming Language :: Python :: 3.8",
108 "Topic :: Scientific/Engineering :: Astronomy",
109 "Topic :: Scientific/Engineering :: Physics",
110 "Topic :: Scientific/Engineering :: Visualization",
111 "Framework :: Matplotlib",
112 ],
113 keywords="astronomy astrophysics visualization " + "amr adaptivemeshrefinement",
114 entry_points={
115 "console_scripts": ["yt = yt.utilities.command_line:run_main",],
116 "nose.plugins.0.10": [
117 "answer-testing = yt.utilities.answer_testing.framework:AnswerTesting"
118 ],
119 },
120 packages=find_packages(),
121 include_package_data=True,
122 install_requires=[
123 "matplotlib>=1.5.3",
124 "setuptools>=19.6",
125 "sympy>=1.2",
126 "numpy>=1.10.4",
127 "IPython>=1.0",
128 "unyt>=2.7.2",
129 ],
130 extras_require={"hub": ["girder_client"], "mapserver": ["bottle"]},
131 cmdclass={"sdist": sdist, "build_ext": build_ext},
132 author="The yt project",
133 author_email="[email protected]",
134 url="https://github.com/yt-project/yt",
135 project_urls={
136 "Homepage": "https://yt-project.org/",
137 "Documentation": "https://yt-project.org/doc/",
138 "Source": "https://github.com/yt-project/yt/",
139 "Tracker": "https://github.com/yt-project/yt/issues",
140 },
141 license="BSD 3-Clause",
142 zip_safe=False,
143 scripts=["scripts/iyt"],
144 ext_modules=[], # !!! We override this inside build_ext above
145 python_requires=">=3.6",
146 )
147
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -5,7 +5,7 @@
from distutils.version import LooseVersion
import pkg_resources
-from setuptools import find_packages, setup
+from setuptools import Distribution, find_packages, setup
from setupext import (
check_for_openmp,
@@ -84,6 +84,16 @@
# This overrides using lib_exts, so it has to happen after lib_exts is fully defined
build_ext, sdist = create_build_ext(lib_exts, cythonize_aliases)
+# Force setuptools to consider that there are ext modules, even if empty.
+# See https://github.com/yt-project/yt/issues/2922 and
+# https://stackoverflow.com/a/62668026/2601223 for the fix.
+class BinaryDistribution(Distribution):
+ """Distribution which always forces a binary package with platform name."""
+
+ def has_ext_modules(self):
+ return True
+
+
if __name__ == "__main__":
setup(
name="yt",
@@ -141,6 +151,7 @@
license="BSD 3-Clause",
zip_safe=False,
scripts=["scripts/iyt"],
+ distclass=BinaryDistribution,
ext_modules=[], # !!! We override this inside build_ext above
python_requires=">=3.6",
)
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -5,7 +5,7 @@\n from distutils.version import LooseVersion\n \n import pkg_resources\n-from setuptools import find_packages, setup\n+from setuptools import Distribution, find_packages, setup\n \n from setupext import (\n check_for_openmp,\n@@ -84,6 +84,16 @@\n # This overrides using lib_exts, so it has to happen after lib_exts is fully defined\n build_ext, sdist = create_build_ext(lib_exts, cythonize_aliases)\n \n+# Force setuptools to consider that there are ext modules, even if empty.\n+# See https://github.com/yt-project/yt/issues/2922 and\n+# https://stackoverflow.com/a/62668026/2601223 for the fix.\n+class BinaryDistribution(Distribution):\n+ \"\"\"Distribution which always forces a binary package with platform name.\"\"\"\n+\n+ def has_ext_modules(self):\n+ return True\n+\n+\n if __name__ == \"__main__\":\n setup(\n name=\"yt\",\n@@ -141,6 +151,7 @@\n license=\"BSD 3-Clause\",\n zip_safe=False,\n scripts=[\"scripts/iyt\"],\n+ distclass=BinaryDistribution,\n ext_modules=[], # !!! We override this inside build_ext above\n python_requires=\">=3.6\",\n )\n", "issue": "Fresh yt installation not importing\n<!--To help us understand and resolve your issue, please fill out the form to\r\nthe best of your ability.-->\r\n<!--You can feel free to delete the sections that do not apply.-->\r\n\r\n### Bug report\r\n\r\n**Bug summary**\r\n\r\nyt fails to import on a fresh development installation. The error is that `ModuleNotFoundError: No module named 'yt.utilities.lib.misc_utilities'` Similar to [Issue 2685](https://github.com/yt-project/yt/issues/2685)\r\n<!--A short 1-2 sentences that succinctly describes the bug-->\r\n\r\n**Code for reproduction**\r\nInstalled using `pip install git+git://github.com/yt-project/yt.git`\r\n<!--A minimum code snippet required to reproduce the bug, also minimizing the\r\nnumber of dependencies required.-->\r\n\r\n<!-- If you need to use a data file to trigger the issue you're having, consider\r\nusing one of the datasets from the yt data hub (http://yt-project.org/data). If\r\nyour issue cannot be triggered using a public dataset, you can use the yt\r\ncurldrop (https://docs.hub.yt/services.html#curldrop) to share data\r\nfiles. Please include a link to the dataset in the issue if you use the\r\ncurldrop.-->\r\n\r\n```python\r\nimport yt\r\n```\r\n\r\n**Actual outcome**\r\n\r\n<!--The output produced by the above code, which may be a screenshot, console\r\noutput, etc.-->\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nModuleNotFoundError Traceback (most recent call last)\r\n<ipython-input-1-2d2292a375dc> in <module>\r\n----> 1 import yt\r\n\r\n~/.conda/envs/ytgit/lib/python3.8/site-packages/yt/__init__.py in <module>\r\n 63 )\r\n 64 \r\n---> 65 from yt.fields.api import (\r\n 66 field_plugins,\r\n 67 DerivedField,\r\n\r\n~/.conda/envs/ytgit/lib/python3.8/site-packages/yt/fields/api.py in <module>\r\n 1 # from . import species_fields\r\n----> 2 from . import (\r\n 3 angular_momentum,\r\n 4 astro_fields,\r\n 5 cosmology_fields,\r\n\r\n~/.conda/envs/ytgit/lib/python3.8/site-packages/yt/fields/angular_momentum.py in <module>\r\n 1 import numpy as np\r\n 2 \r\n----> 3 from yt.utilities.lib.misc_utilities import (\r\n 4 obtain_position_vector,\r\n 5 obtain_relative_velocity_vector,\r\n\r\nModuleNotFoundError: No module named 'yt.utilities.lib.misc_utilities'\r\n```\r\n\r\n**Expected outcome**\r\n\r\n<!--A description of the expected outcome from the code snippet-->\r\n<!--If this used to work in an earlier version of yt, please note the\r\nversion it used to work on-->\r\n\r\n**Version Information**\r\n<!--Please specify your platform and versions of the relevant libraries you are\r\nusing:-->\r\n * Operating System: CentOS Linux 7 (Core)x86_64\r\n * Python Version: 3.8.5\r\n * yt version: 4.0.dev0\r\n * Other Libraries (if applicable): N/A\r\n\r\n<!--Please tell us how you installed yt and python e.g., from source,\r\npip, conda. If you installed from conda, please specify which channel you used\r\nif not the default-->\r\n\r\nInstalled using `pip install git+git://github.com/yt-project/yt.git`\r\n\r\n\r\nThanks!\n", "before_files": [{"content": "import glob\nimport os\nimport sys\nfrom distutils.ccompiler import get_default_compiler\nfrom distutils.version import LooseVersion\n\nimport pkg_resources\nfrom setuptools import find_packages, setup\n\nfrom setupext import (\n check_for_openmp,\n check_for_pyembree,\n create_build_ext,\n install_ccompiler,\n)\n\ninstall_ccompiler()\n\ntry:\n distribute_ver = LooseVersion(pkg_resources.get_distribution(\"distribute\").version)\n if distribute_ver < LooseVersion(\"0.7.3\"):\n print(\"Distribute is a legacy package obsoleted by setuptools.\")\n print(\"We strongly recommend that you just uninstall it.\")\n print(\"If for some reason you cannot do it, you'll need to upgrade it\")\n print(\"to latest version before proceeding:\")\n print(\" pip install -U distribute\")\n sys.exit(1)\nexcept pkg_resources.DistributionNotFound:\n pass # yay!\n\nVERSION = \"4.0.dev0\"\n\nif os.path.exists(\"MANIFEST\"):\n os.remove(\"MANIFEST\")\n\nwith open(\"README.md\") as file:\n long_description = file.read()\n\nif check_for_openmp():\n omp_args = [\"-fopenmp\"]\nelse:\n omp_args = []\n\nif os.name == \"nt\":\n std_libs = []\nelse:\n std_libs = [\"m\"]\n\nif get_default_compiler() == \"msvc\":\n CPP14_FLAG = [\"/std:c++14\"]\nelse:\n CPP14_FLAG = [\"--std=c++14\"]\n\ncythonize_aliases = {\n \"LIB_DIR\": \"yt/utilities/lib/\",\n \"LIB_DIR_EWAH\": [\"yt/utilities/lib/\", \"yt/utilities/lib/ewahboolarray/\"],\n \"LIB_DIR_GEOM\": [\"yt/utilities/lib/\", \"yt/geometry/\"],\n \"LIB_DIR_GEOM_ARTIO\": [\n \"yt/utilities/lib/\",\n \"yt/geometry/\",\n \"yt/frontends/artio/artio_headers/\",\n ],\n \"STD_LIBS\": std_libs,\n \"OMP_ARGS\": omp_args,\n \"FIXED_INTERP\": \"yt/utilities/lib/fixed_interpolator.cpp\",\n \"ARTIO_SOURCE\": glob.glob(\"yt/frontends/artio/artio_headers/*.c\"),\n \"CPP14_FLAG\": CPP14_FLAG,\n}\n\nlib_exts = [\n \"yt/geometry/*.pyx\",\n \"yt/utilities/cython_fortran_utils.pyx\",\n \"yt/frontends/ramses/io_utils.pyx\",\n \"yt/utilities/lib/cykdtree/kdtree.pyx\",\n \"yt/utilities/lib/cykdtree/utils.pyx\",\n \"yt/frontends/artio/_artio_caller.pyx\",\n \"yt/utilities/lib/*.pyx\",\n]\n\nembree_libs, embree_aliases = check_for_pyembree(std_libs)\ncythonize_aliases.update(embree_aliases)\nlib_exts += embree_libs\n\n# This overrides using lib_exts, so it has to happen after lib_exts is fully defined\nbuild_ext, sdist = create_build_ext(lib_exts, cythonize_aliases)\n\nif __name__ == \"__main__\":\n setup(\n name=\"yt\",\n version=VERSION,\n description=\"An analysis and visualization toolkit for volumetric data\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: AIX\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: C\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Scientific/Engineering :: Astronomy\",\n \"Topic :: Scientific/Engineering :: Physics\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Framework :: Matplotlib\",\n ],\n keywords=\"astronomy astrophysics visualization \" + \"amr adaptivemeshrefinement\",\n entry_points={\n \"console_scripts\": [\"yt = yt.utilities.command_line:run_main\",],\n \"nose.plugins.0.10\": [\n \"answer-testing = yt.utilities.answer_testing.framework:AnswerTesting\"\n ],\n },\n packages=find_packages(),\n include_package_data=True,\n install_requires=[\n \"matplotlib>=1.5.3\",\n \"setuptools>=19.6\",\n \"sympy>=1.2\",\n \"numpy>=1.10.4\",\n \"IPython>=1.0\",\n \"unyt>=2.7.2\",\n ],\n extras_require={\"hub\": [\"girder_client\"], \"mapserver\": [\"bottle\"]},\n cmdclass={\"sdist\": sdist, \"build_ext\": build_ext},\n author=\"The yt project\",\n author_email=\"[email protected]\",\n url=\"https://github.com/yt-project/yt\",\n project_urls={\n \"Homepage\": \"https://yt-project.org/\",\n \"Documentation\": \"https://yt-project.org/doc/\",\n \"Source\": \"https://github.com/yt-project/yt/\",\n \"Tracker\": \"https://github.com/yt-project/yt/issues\",\n },\n license=\"BSD 3-Clause\",\n zip_safe=False,\n scripts=[\"scripts/iyt\"],\n ext_modules=[], # !!! We override this inside build_ext above\n python_requires=\">=3.6\",\n )\n", "path": "setup.py"}], "after_files": [{"content": "import glob\nimport os\nimport sys\nfrom distutils.ccompiler import get_default_compiler\nfrom distutils.version import LooseVersion\n\nimport pkg_resources\nfrom setuptools import Distribution, find_packages, setup\n\nfrom setupext import (\n check_for_openmp,\n check_for_pyembree,\n create_build_ext,\n install_ccompiler,\n)\n\ninstall_ccompiler()\n\ntry:\n distribute_ver = LooseVersion(pkg_resources.get_distribution(\"distribute\").version)\n if distribute_ver < LooseVersion(\"0.7.3\"):\n print(\"Distribute is a legacy package obsoleted by setuptools.\")\n print(\"We strongly recommend that you just uninstall it.\")\n print(\"If for some reason you cannot do it, you'll need to upgrade it\")\n print(\"to latest version before proceeding:\")\n print(\" pip install -U distribute\")\n sys.exit(1)\nexcept pkg_resources.DistributionNotFound:\n pass # yay!\n\nVERSION = \"4.0.dev0\"\n\nif os.path.exists(\"MANIFEST\"):\n os.remove(\"MANIFEST\")\n\nwith open(\"README.md\") as file:\n long_description = file.read()\n\nif check_for_openmp():\n omp_args = [\"-fopenmp\"]\nelse:\n omp_args = []\n\nif os.name == \"nt\":\n std_libs = []\nelse:\n std_libs = [\"m\"]\n\nif get_default_compiler() == \"msvc\":\n CPP14_FLAG = [\"/std:c++14\"]\nelse:\n CPP14_FLAG = [\"--std=c++14\"]\n\ncythonize_aliases = {\n \"LIB_DIR\": \"yt/utilities/lib/\",\n \"LIB_DIR_EWAH\": [\"yt/utilities/lib/\", \"yt/utilities/lib/ewahboolarray/\"],\n \"LIB_DIR_GEOM\": [\"yt/utilities/lib/\", \"yt/geometry/\"],\n \"LIB_DIR_GEOM_ARTIO\": [\n \"yt/utilities/lib/\",\n \"yt/geometry/\",\n \"yt/frontends/artio/artio_headers/\",\n ],\n \"STD_LIBS\": std_libs,\n \"OMP_ARGS\": omp_args,\n \"FIXED_INTERP\": \"yt/utilities/lib/fixed_interpolator.cpp\",\n \"ARTIO_SOURCE\": glob.glob(\"yt/frontends/artio/artio_headers/*.c\"),\n \"CPP14_FLAG\": CPP14_FLAG,\n}\n\nlib_exts = [\n \"yt/geometry/*.pyx\",\n \"yt/utilities/cython_fortran_utils.pyx\",\n \"yt/frontends/ramses/io_utils.pyx\",\n \"yt/utilities/lib/cykdtree/kdtree.pyx\",\n \"yt/utilities/lib/cykdtree/utils.pyx\",\n \"yt/frontends/artio/_artio_caller.pyx\",\n \"yt/utilities/lib/*.pyx\",\n]\n\nembree_libs, embree_aliases = check_for_pyembree(std_libs)\ncythonize_aliases.update(embree_aliases)\nlib_exts += embree_libs\n\n# This overrides using lib_exts, so it has to happen after lib_exts is fully defined\nbuild_ext, sdist = create_build_ext(lib_exts, cythonize_aliases)\n\n# Force setuptools to consider that there are ext modules, even if empty.\n# See https://github.com/yt-project/yt/issues/2922 and\n# https://stackoverflow.com/a/62668026/2601223 for the fix.\nclass BinaryDistribution(Distribution):\n \"\"\"Distribution which always forces a binary package with platform name.\"\"\"\n\n def has_ext_modules(self):\n return True\n\n\nif __name__ == \"__main__\":\n setup(\n name=\"yt\",\n version=VERSION,\n description=\"An analysis and visualization toolkit for volumetric data\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: AIX\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: C\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Scientific/Engineering :: Astronomy\",\n \"Topic :: Scientific/Engineering :: Physics\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Framework :: Matplotlib\",\n ],\n keywords=\"astronomy astrophysics visualization \" + \"amr adaptivemeshrefinement\",\n entry_points={\n \"console_scripts\": [\"yt = yt.utilities.command_line:run_main\",],\n \"nose.plugins.0.10\": [\n \"answer-testing = yt.utilities.answer_testing.framework:AnswerTesting\"\n ],\n },\n packages=find_packages(),\n include_package_data=True,\n install_requires=[\n \"matplotlib>=1.5.3\",\n \"setuptools>=19.6\",\n \"sympy>=1.2\",\n \"numpy>=1.10.4\",\n \"IPython>=1.0\",\n \"unyt>=2.7.2\",\n ],\n extras_require={\"hub\": [\"girder_client\"], \"mapserver\": [\"bottle\"]},\n cmdclass={\"sdist\": sdist, \"build_ext\": build_ext},\n author=\"The yt project\",\n author_email=\"[email protected]\",\n url=\"https://github.com/yt-project/yt\",\n project_urls={\n \"Homepage\": \"https://yt-project.org/\",\n \"Documentation\": \"https://yt-project.org/doc/\",\n \"Source\": \"https://github.com/yt-project/yt/\",\n \"Tracker\": \"https://github.com/yt-project/yt/issues\",\n },\n license=\"BSD 3-Clause\",\n zip_safe=False,\n scripts=[\"scripts/iyt\"],\n distclass=BinaryDistribution,\n ext_modules=[], # !!! We override this inside build_ext above\n python_requires=\">=3.6\",\n )\n", "path": "setup.py"}]}
| 2,543 | 308 |
gh_patches_debug_16891
|
rasdani/github-patches
|
git_diff
|
medtagger__MedTagger-447
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bump Python to 3.7.0
## Expected Behavior
MedTagger should always use the latest version of Python technologies and follow the rabbit instead of leaving technical dept :)
## Actual Behavior
We've got Python 3.6.x right now.
## Additional comment
Remember about Makefiles, Dockerfiles, TravisCI and more(?).
**WATCH OUT!** It's relatively new. Some of our dependencies may not work properly! Find out if `numpy` and other libs supports it!
**BLOCKED BY:**
- [SimpleITK](https://github.com/SimpleITK/SimpleITK/releases) - next release (>1.1.0) will be fine to use with Python3.7.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/scripts/import_data.py`
Content:
```
1 """Script that will fill MedTagger with data.
2
3 How to use it?
4 --------------
5 At first, download some scans from anywhere on the Internet. You can use example dataset from Data Science Bowl 2017:
6 https://www.kaggle.com/c/data-science-bowl-2017/data (look for 'sample_images.7z' file).
7
8 Then, place these data (unzipped) anywhere on your computer and run this script by:
9
10 (venv) $ python3.6 scripts/import_data.py --source=./dir_with_scans/
11
12 Please keep all scans with given structure:
13
14 |
15 `-- dir_with_scans
16 |-- 0a0c32c9e08cc2ea76a71649de56be6d
17 | |-- 0a67f9edb4915467ac16a565955898d3.dcm
18 | |-- 0eb4e3cae3de93e50431cf12bdc6c93d.dcm
19 | `-- ...
20 |-- 0a38e7597ca26f9374f8ea2770ba870d
21 | |-- 0bad9c3a3890617f78a905b78bc60f99.dcm
22 | |-- 1cffdd431884c2792ae0cbecec1c9e14.dcm
23 | `-- ...
24 `-- ...
25
26 """
27 import os
28 import argparse
29 import glob
30 import logging
31 import logging.config
32
33 from medtagger.repositories import scans as ScansRepository, datasets as DatasetsRepository
34 from medtagger.workers.storage import parse_dicom_and_update_slice
35
36
37 logging.config.fileConfig('logging.conf')
38 logger = logging.getLogger(__name__)
39
40 parser = argparse.ArgumentParser(description='Import data to the MedTagger.')
41 parser.add_argument('--source', type=str, required=True, help='Source directory')
42 parser.add_argument('--dataset', type=str, required=True, help='Dataset key for these scans')
43 args = parser.parse_args()
44
45
46 if __name__ == '__main__':
47 logger.info('Checking Dataset...')
48 dataset = DatasetsRepository.get_dataset_by_key(args.dataset)
49
50 source = args.source.rstrip('/')
51 for scan_directory in glob.iglob(source + '/*'):
52 if not os.path.isdir(scan_directory):
53 logger.warning('"%s" is not a directory. Skipping...', scan_directory)
54 continue
55
56 logger.info('Adding new Scan from "%s".', scan_directory)
57 slice_names = glob.glob(scan_directory + '/*.dcm')
58 number_of_slices = len(slice_names)
59 scan = ScansRepository.add_new_scan(dataset, number_of_slices, None)
60
61 for slice_name in slice_names:
62 logger.info('Adding new Slice to Scan "%s" based on "%s".', scan.id, slice_name)
63 with open(slice_name, 'rb') as slice_dicom_file:
64 _slice = scan.add_slice()
65 image = slice_dicom_file.read()
66 parse_dicom_and_update_slice.delay(_slice.id, image)
67
```
Path: `backend/scripts/convert_dicoms_to_png.py`
Content:
```
1 """Script that will convert multiple dicoms to PNG format.
2
3 How to use it?
4 --------------
5 At first, download some scans from anywhere on the Internet. You can use example dataset from Data Science Bowl 2017:
6 https://www.kaggle.com/c/data-science-bowl-2017/data (look for 'sample_images.7z' file).
7
8 Then, place these data (unzipped) anywhere on your computer and run this script by:
9
10 (venv) $ python3.6 scripts/dicoms_to_png.py --input=./dir_with_scans/ --output=./dir_with_scans/converted/
11
12 Name of the converted Dicom file is a position of the scan on the z axis.
13 """
14 import os
15 import argparse
16
17 import SimpleITK as sitk
18 from PIL import Image
19
20 from medtagger.conversion import convert_slice_to_normalized_8bit_array
21 from medtagger.definitions import DicomTag
22 from medtagger.dicoms import read_list
23
24
25 parser = argparse.ArgumentParser(description='Convert dicoms to png format.')
26 parser.add_argument('--input', type=str, required=True, help='Full path to directory where dicoms are located')
27 parser.add_argument('--output', type=str, required=True,
28 help='Full path to directory where converted dicoms would be located')
29
30 args = parser.parse_args()
31 dicoms_folder_path = args.input
32 converted_dicoms_folder_path = args.output
33
34 dicoms = [sitk.ReadImage(dicoms_folder_path + d) for d in os.listdir(dicoms_folder_path) if
35 os.path.isfile(dicoms_folder_path + d)]
36 min_position = abs(min(float((read_list(dicom, DicomTag.IMAGE_POSITION_PATIENT) or [])[2]) for dicom in dicoms))
37
38 if not os.path.exists(converted_dicoms_folder_path):
39 os.mkdir(converted_dicoms_folder_path)
40
41 for single_dicom in dicoms:
42 image_bytes = convert_slice_to_normalized_8bit_array(single_dicom)
43 slice_position = float((read_list(single_dicom, DicomTag.IMAGE_POSITION_PATIENT) or [])[2])
44 converted_dicom_name = '{0:.2f}'.format(slice_position + min_position)
45 Image.fromarray(image_bytes, 'L').save(converted_dicoms_folder_path + converted_dicom_name + '.png')
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/backend/scripts/convert_dicoms_to_png.py b/backend/scripts/convert_dicoms_to_png.py
--- a/backend/scripts/convert_dicoms_to_png.py
+++ b/backend/scripts/convert_dicoms_to_png.py
@@ -7,7 +7,7 @@
Then, place these data (unzipped) anywhere on your computer and run this script by:
- (venv) $ python3.6 scripts/dicoms_to_png.py --input=./dir_with_scans/ --output=./dir_with_scans/converted/
+ (venv) $ python3.7 scripts/dicoms_to_png.py --input=./dir_with_scans/ --output=./dir_with_scans/converted/
Name of the converted Dicom file is a position of the scan on the z axis.
"""
diff --git a/backend/scripts/import_data.py b/backend/scripts/import_data.py
--- a/backend/scripts/import_data.py
+++ b/backend/scripts/import_data.py
@@ -7,7 +7,7 @@
Then, place these data (unzipped) anywhere on your computer and run this script by:
- (venv) $ python3.6 scripts/import_data.py --source=./dir_with_scans/
+ (venv) $ python3.7 scripts/import_data.py --source=./dir_with_scans/
Please keep all scans with given structure:
|
{"golden_diff": "diff --git a/backend/scripts/convert_dicoms_to_png.py b/backend/scripts/convert_dicoms_to_png.py\n--- a/backend/scripts/convert_dicoms_to_png.py\n+++ b/backend/scripts/convert_dicoms_to_png.py\n@@ -7,7 +7,7 @@\n \n Then, place these data (unzipped) anywhere on your computer and run this script by:\n \n- (venv) $ python3.6 scripts/dicoms_to_png.py --input=./dir_with_scans/ --output=./dir_with_scans/converted/\n+ (venv) $ python3.7 scripts/dicoms_to_png.py --input=./dir_with_scans/ --output=./dir_with_scans/converted/\n \n Name of the converted Dicom file is a position of the scan on the z axis.\n \"\"\"\ndiff --git a/backend/scripts/import_data.py b/backend/scripts/import_data.py\n--- a/backend/scripts/import_data.py\n+++ b/backend/scripts/import_data.py\n@@ -7,7 +7,7 @@\n \n Then, place these data (unzipped) anywhere on your computer and run this script by:\n \n- (venv) $ python3.6 scripts/import_data.py --source=./dir_with_scans/\n+ (venv) $ python3.7 scripts/import_data.py --source=./dir_with_scans/\n \n Please keep all scans with given structure:\n", "issue": "Bump Python to 3.7.0\n## Expected Behavior\r\n\r\nMedTagger should always use the latest version of Python technologies and follow the rabbit instead of leaving technical dept :)\r\n\r\n## Actual Behavior\r\n\r\nWe've got Python 3.6.x right now.\r\n\r\n## Additional comment\r\n\r\nRemember about Makefiles, Dockerfiles, TravisCI and more(?).\r\n\r\n**WATCH OUT!** It's relatively new. Some of our dependencies may not work properly! Find out if `numpy` and other libs supports it!\r\n\r\n**BLOCKED BY:**\r\n- [SimpleITK](https://github.com/SimpleITK/SimpleITK/releases) - next release (>1.1.0) will be fine to use with Python3.7.\n", "before_files": [{"content": "\"\"\"Script that will fill MedTagger with data.\n\nHow to use it?\n--------------\nAt first, download some scans from anywhere on the Internet. You can use example dataset from Data Science Bowl 2017:\nhttps://www.kaggle.com/c/data-science-bowl-2017/data (look for 'sample_images.7z' file).\n\nThen, place these data (unzipped) anywhere on your computer and run this script by:\n\n (venv) $ python3.6 scripts/import_data.py --source=./dir_with_scans/\n\nPlease keep all scans with given structure:\n\n |\n `-- dir_with_scans\n |-- 0a0c32c9e08cc2ea76a71649de56be6d\n | |-- 0a67f9edb4915467ac16a565955898d3.dcm\n | |-- 0eb4e3cae3de93e50431cf12bdc6c93d.dcm\n | `-- ...\n |-- 0a38e7597ca26f9374f8ea2770ba870d\n | |-- 0bad9c3a3890617f78a905b78bc60f99.dcm\n | |-- 1cffdd431884c2792ae0cbecec1c9e14.dcm\n | `-- ...\n `-- ...\n\n\"\"\"\nimport os\nimport argparse\nimport glob\nimport logging\nimport logging.config\n\nfrom medtagger.repositories import scans as ScansRepository, datasets as DatasetsRepository\nfrom medtagger.workers.storage import parse_dicom_and_update_slice\n\n\nlogging.config.fileConfig('logging.conf')\nlogger = logging.getLogger(__name__)\n\nparser = argparse.ArgumentParser(description='Import data to the MedTagger.')\nparser.add_argument('--source', type=str, required=True, help='Source directory')\nparser.add_argument('--dataset', type=str, required=True, help='Dataset key for these scans')\nargs = parser.parse_args()\n\n\nif __name__ == '__main__':\n logger.info('Checking Dataset...')\n dataset = DatasetsRepository.get_dataset_by_key(args.dataset)\n\n source = args.source.rstrip('/')\n for scan_directory in glob.iglob(source + '/*'):\n if not os.path.isdir(scan_directory):\n logger.warning('\"%s\" is not a directory. Skipping...', scan_directory)\n continue\n\n logger.info('Adding new Scan from \"%s\".', scan_directory)\n slice_names = glob.glob(scan_directory + '/*.dcm')\n number_of_slices = len(slice_names)\n scan = ScansRepository.add_new_scan(dataset, number_of_slices, None)\n\n for slice_name in slice_names:\n logger.info('Adding new Slice to Scan \"%s\" based on \"%s\".', scan.id, slice_name)\n with open(slice_name, 'rb') as slice_dicom_file:\n _slice = scan.add_slice()\n image = slice_dicom_file.read()\n parse_dicom_and_update_slice.delay(_slice.id, image)\n", "path": "backend/scripts/import_data.py"}, {"content": "\"\"\"Script that will convert multiple dicoms to PNG format.\n\nHow to use it?\n--------------\nAt first, download some scans from anywhere on the Internet. You can use example dataset from Data Science Bowl 2017:\nhttps://www.kaggle.com/c/data-science-bowl-2017/data (look for 'sample_images.7z' file).\n\nThen, place these data (unzipped) anywhere on your computer and run this script by:\n\n (venv) $ python3.6 scripts/dicoms_to_png.py --input=./dir_with_scans/ --output=./dir_with_scans/converted/\n\nName of the converted Dicom file is a position of the scan on the z axis.\n\"\"\"\nimport os\nimport argparse\n\nimport SimpleITK as sitk\nfrom PIL import Image\n\nfrom medtagger.conversion import convert_slice_to_normalized_8bit_array\nfrom medtagger.definitions import DicomTag\nfrom medtagger.dicoms import read_list\n\n\nparser = argparse.ArgumentParser(description='Convert dicoms to png format.')\nparser.add_argument('--input', type=str, required=True, help='Full path to directory where dicoms are located')\nparser.add_argument('--output', type=str, required=True,\n help='Full path to directory where converted dicoms would be located')\n\nargs = parser.parse_args()\ndicoms_folder_path = args.input\nconverted_dicoms_folder_path = args.output\n\ndicoms = [sitk.ReadImage(dicoms_folder_path + d) for d in os.listdir(dicoms_folder_path) if\n os.path.isfile(dicoms_folder_path + d)]\nmin_position = abs(min(float((read_list(dicom, DicomTag.IMAGE_POSITION_PATIENT) or [])[2]) for dicom in dicoms))\n\nif not os.path.exists(converted_dicoms_folder_path):\n os.mkdir(converted_dicoms_folder_path)\n\nfor single_dicom in dicoms:\n image_bytes = convert_slice_to_normalized_8bit_array(single_dicom)\n slice_position = float((read_list(single_dicom, DicomTag.IMAGE_POSITION_PATIENT) or [])[2])\n converted_dicom_name = '{0:.2f}'.format(slice_position + min_position)\n Image.fromarray(image_bytes, 'L').save(converted_dicoms_folder_path + converted_dicom_name + '.png')\n", "path": "backend/scripts/convert_dicoms_to_png.py"}], "after_files": [{"content": "\"\"\"Script that will fill MedTagger with data.\n\nHow to use it?\n--------------\nAt first, download some scans from anywhere on the Internet. You can use example dataset from Data Science Bowl 2017:\nhttps://www.kaggle.com/c/data-science-bowl-2017/data (look for 'sample_images.7z' file).\n\nThen, place these data (unzipped) anywhere on your computer and run this script by:\n\n (venv) $ python3.7 scripts/import_data.py --source=./dir_with_scans/\n\nPlease keep all scans with given structure:\n\n |\n `-- dir_with_scans\n |-- 0a0c32c9e08cc2ea76a71649de56be6d\n | |-- 0a67f9edb4915467ac16a565955898d3.dcm\n | |-- 0eb4e3cae3de93e50431cf12bdc6c93d.dcm\n | `-- ...\n |-- 0a38e7597ca26f9374f8ea2770ba870d\n | |-- 0bad9c3a3890617f78a905b78bc60f99.dcm\n | |-- 1cffdd431884c2792ae0cbecec1c9e14.dcm\n | `-- ...\n `-- ...\n\n\"\"\"\nimport os\nimport argparse\nimport glob\nimport logging\nimport logging.config\n\nfrom medtagger.repositories import scans as ScansRepository, datasets as DatasetsRepository\nfrom medtagger.workers.storage import parse_dicom_and_update_slice\n\n\nlogging.config.fileConfig('logging.conf')\nlogger = logging.getLogger(__name__)\n\nparser = argparse.ArgumentParser(description='Import data to the MedTagger.')\nparser.add_argument('--source', type=str, required=True, help='Source directory')\nparser.add_argument('--dataset', type=str, required=True, help='Dataset key for these scans')\nargs = parser.parse_args()\n\n\nif __name__ == '__main__':\n logger.info('Checking Dataset...')\n dataset = DatasetsRepository.get_dataset_by_key(args.dataset)\n\n source = args.source.rstrip('/')\n for scan_directory in glob.iglob(source + '/*'):\n if not os.path.isdir(scan_directory):\n logger.warning('\"%s\" is not a directory. Skipping...', scan_directory)\n continue\n\n logger.info('Adding new Scan from \"%s\".', scan_directory)\n slice_names = glob.glob(scan_directory + '/*.dcm')\n number_of_slices = len(slice_names)\n scan = ScansRepository.add_new_scan(dataset, number_of_slices, None)\n\n for slice_name in slice_names:\n logger.info('Adding new Slice to Scan \"%s\" based on \"%s\".', scan.id, slice_name)\n with open(slice_name, 'rb') as slice_dicom_file:\n _slice = scan.add_slice()\n image = slice_dicom_file.read()\n parse_dicom_and_update_slice.delay(_slice.id, image)\n", "path": "backend/scripts/import_data.py"}, {"content": "\"\"\"Script that will convert multiple dicoms to PNG format.\n\nHow to use it?\n--------------\nAt first, download some scans from anywhere on the Internet. You can use example dataset from Data Science Bowl 2017:\nhttps://www.kaggle.com/c/data-science-bowl-2017/data (look for 'sample_images.7z' file).\n\nThen, place these data (unzipped) anywhere on your computer and run this script by:\n\n (venv) $ python3.7 scripts/dicoms_to_png.py --input=./dir_with_scans/ --output=./dir_with_scans/converted/\n\nName of the converted Dicom file is a position of the scan on the z axis.\n\"\"\"\nimport os\nimport argparse\n\nimport SimpleITK as sitk\nfrom PIL import Image\n\nfrom medtagger.conversion import convert_slice_to_normalized_8bit_array\nfrom medtagger.definitions import DicomTag\nfrom medtagger.dicoms import read_list\n\n\nparser = argparse.ArgumentParser(description='Convert dicoms to png format.')\nparser.add_argument('--input', type=str, required=True, help='Full path to directory where dicoms are located')\nparser.add_argument('--output', type=str, required=True,\n help='Full path to directory where converted dicoms would be located')\n\nargs = parser.parse_args()\ndicoms_folder_path = args.input\nconverted_dicoms_folder_path = args.output\n\ndicoms = [sitk.ReadImage(dicoms_folder_path + d) for d in os.listdir(dicoms_folder_path) if\n os.path.isfile(dicoms_folder_path + d)]\nmin_position = abs(min(float((read_list(dicom, DicomTag.IMAGE_POSITION_PATIENT) or [])[2]) for dicom in dicoms))\n\nif not os.path.exists(converted_dicoms_folder_path):\n os.mkdir(converted_dicoms_folder_path)\n\nfor single_dicom in dicoms:\n image_bytes = convert_slice_to_normalized_8bit_array(single_dicom)\n slice_position = float((read_list(single_dicom, DicomTag.IMAGE_POSITION_PATIENT) or [])[2])\n converted_dicom_name = '{0:.2f}'.format(slice_position + min_position)\n Image.fromarray(image_bytes, 'L').save(converted_dicoms_folder_path + converted_dicom_name + '.png')\n", "path": "backend/scripts/convert_dicoms_to_png.py"}]}
| 1,840 | 291 |
gh_patches_debug_41290
|
rasdani/github-patches
|
git_diff
|
cornellius-gp__gpytorch-1904
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug] Gradient computation with KeOps kernel only works with low number of training data, fails otherwise
# 🐛 Bug
Hi,
First of all, I would like to extend my thanks to all the developers for all the efforts you have put into both the research and this great package.
Consider a GP with a KeOps kernel (e.g. gpytorch.kernels.keops.RBFKernel). If I train it with N=100 number of points, then the gradient of predictive mean can be obtained by torch.autograd.grad or .backward(), but set N=500 and an error will be thrown that the input tensor was not used in the graph. I have tested the script on two separate machines and a colab instance. Using the GPyTorch standard kernels will not run into this issue. I spent a good deal of time pinpointing what was wrong from bigger chunks of code and this seemed to be the issue. The gradient link seems to cut off between the covariance output of the KeOps kernel and the input (covar.x1).
I have provided a minimal code right below that should quickly give you an idea of this somewhat strange behavior. It contains two test cases with N=100 (which passes) and 500 (fails). The code is from GPyTorch regression examples, I have only added the GP kernel and a few lines for calculating gradients at the end.
## To reproduce
```python
import math
import torch
import gpytorch
import time
# We will use the simplest form of GP model, exact inference with gpytorch.kernels.keops.RBFKernel
class ExactGPModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(ExactGPModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.keops.RBFKernel())
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
def train_and_eval_GP(N = 100):
"""
inputs:
N (int): Number of training points
"""
# make train/val/test
# Training data is 100 points in [0,1] inclusive regularly spaced
train_x = torch.linspace(0, 1, N)
# True function is sin(2*pi*x) with Gaussian noise
train_y = torch.sin(train_x * (2 * math.pi)) + torch.randn(train_x.size()) * math.sqrt(0.04)
# normalize features
mean = train_x.mean()
std = train_x.std() + 1e-6 # prevent dividing by 0
train_x = (train_x - mean) / std
# normalize labels
mean, std = train_y.mean(),train_y.std()
train_y = (train_y - mean) / std
# make continguous
train_x, train_y = train_x.contiguous(), train_y.contiguous()
output_device = torch.device('cuda:0')
train_x, train_y = train_x.to(output_device), train_y.to(output_device)
# initialize likelihood and model
likelihood = gpytorch.likelihoods.GaussianLikelihood().to(output_device)
model = ExactGPModel(train_x, train_y, likelihood).to(output_device)
# Find optimal model hyperparameters
model.train()
likelihood.train()
# Use the adam optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=0.1) # Includes GaussianLikelihood parameters
# "Loss" for GPs - the marginal log likelihood
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
training_iter = 20
for i in range(training_iter):
# Zero gradients from previous iteration
optimizer.zero_grad()
# Output from model
output = model(train_x)
# Calc loss and backprop gradients
loss = -mll(output, train_y)
loss.backward()
optimizer.step()
print(f'GP model trained.')
# Get into evaluation (predictive posterior) mode
model.eval()
likelihood.eval()
# Test points are regularly spaced along [0,1]
test_x = torch.linspace(0, 1, 51, requires_grad=True).to(output_device).contiguous()
# Make predictions by feeding model through likelihood
with gpytorch.settings.fast_pred_var():
observed_pred = likelihood(model(test_x))
assert torch.autograd.grad(observed_pred.mean.sum(), test_x, retain_graph=True) is not None
print('gradient exists:')
print(torch.autograd.grad(observed_pred.mean.sum(), test_x, retain_graph=True))
if __name__ == "__main__":
Ns = [100, 500] #test cases
for n in Ns:
try:
print(f'testing with {n} points...')
train_and_eval_GP(N = n)
print('success!')
except Exception as e:
print('failed.')
print(e)
```
** Stack trace/error message **
```
testing with 100 points...
GP model trained.
gradient exists:
(tensor([-2.6629e+00, -2.6507e+00, -2.6344e+00, -2.6138e+00, -2.5891e+00,
...,
9.1844e-01], device='cuda:0'),)
success!
testing with 500 points...
GP model trained.
failed.
One of the differentiated Tensors appears to not have been used in the graph. Set allow_unused=True if this is the desired behavior.
```
## Expected Behavior
Setting the kernel to the standard, non-KeOps kernel (gpytorch.kernels.RBFKernel) we get the gradients for the second case, shown below. However, I can't simply use it since I'm working on a larger dataset that will run out of memory if I do so.
```
testing with 100 points...
GP model trained.
gradient exists:
(tensor([-2.5885, -2.5870, -2.5819, -2.5732, -2.5609, -2.5449, -2.5254, -2.5022,
...,
0.6977, 0.7880, 0.8762], device='cuda:0'),)
success!
testing with 500 points...
GP model trained.
gradient exists:
(tensor([-2.4751, -2.4741, -2.4698, -2.4622, -2.4514, -2.4373, -2.4199, -2.3993,
...,
0.4727, 0.5600, 0.6462], device='cuda:0'),)
success!
```
## System information
**Please complete the following information:**
- <!-- GPyTorch Version (run `print(gpytorch.__version__)` --> GPyTorch 1.6.0
- <!-- PyTorch Version (run `print(torch.__version__)` --> PyTorch 1.10.1
- <!-- KeOps Version --> KeOps 1.5
- <!-- Computer OS --> Ubuntu 20.04.3 LTS
## Additional context
I know that the issue might be unrelated to GPyTorch as this clearly stems from the keops kernel. It is however difficult to track, so I thought I'd report it here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gpytorch/kernels/keops/rbf_kernel.py`
Content:
```
1 #!/usr/bin/env python3
2 import torch
3
4 from ... import settings
5 from ...lazy import KeOpsLazyTensor
6 from ..rbf_kernel import postprocess_rbf
7 from .keops_kernel import KeOpsKernel
8
9 try:
10 from pykeops.torch import LazyTensor as KEOLazyTensor
11
12 class RBFKernel(KeOpsKernel):
13 """
14 Implements the RBF kernel using KeOps as a driver for kernel matrix multiplies.
15
16 This class can be used as a drop in replacement for gpytorch.kernels.RBFKernel in most cases, and supports
17 the same arguments. There are currently a few limitations, for example a lack of batch mode support. However,
18 most other features like ARD will work.
19 """
20
21 has_lengthscale = True
22
23 def _nonkeops_covar_func(self, x1, x2, diag=False):
24 return self.covar_dist(
25 x1, x2, square_dist=True, diag=diag, dist_postprocess_func=postprocess_rbf, postprocess=True
26 )
27
28 def covar_func(self, x1, x2, diag=False):
29 # We only should use KeOps on big kernel matrices
30 # If we would otherwise be performing Cholesky inference, (or when just computing a kernel matrix diag)
31 # then don't apply KeOps
32 if (
33 diag
34 or x1.size(-2) < settings.max_cholesky_size.value()
35 or x2.size(-2) < settings.max_cholesky_size.value()
36 ):
37 return self._nonkeops_covar_func(x1, x2, diag=diag)
38
39 with torch.autograd.enable_grad():
40 x1_ = KEOLazyTensor(x1[..., :, None, :])
41 x2_ = KEOLazyTensor(x2[..., None, :, :])
42
43 K = (-((x1_ - x2_) ** 2).sum(-1) / 2).exp()
44
45 return K
46
47 def forward(self, x1, x2, diag=False, **params):
48 x1_ = x1.div(self.lengthscale)
49 x2_ = x2.div(self.lengthscale)
50
51 covar_func = lambda x1, x2, diag=diag: self.covar_func(x1, x2, diag)
52
53 if diag:
54 return covar_func(x1_, x2_, diag=True)
55
56 return KeOpsLazyTensor(x1_, x2_, covar_func)
57
58
59 except ImportError:
60
61 class RBFKernel(KeOpsKernel):
62 def __init__(self, *args, **kwargs):
63 super().__init__()
64
```
Path: `gpytorch/kernels/keops/matern_kernel.py`
Content:
```
1 #!/usr/bin/env python3
2 import math
3
4 import torch
5
6 from ... import settings
7 from ...lazy import KeOpsLazyTensor
8 from .keops_kernel import KeOpsKernel
9
10 try:
11 from pykeops.torch import LazyTensor as KEOLazyTensor
12
13 class MaternKernel(KeOpsKernel):
14 """
15 Implements the Matern kernel using KeOps as a driver for kernel matrix multiplies.
16
17 This class can be used as a drop in replacement for gpytorch.kernels.MaternKernel in most cases, and supports
18 the same arguments. There are currently a few limitations, for example a lack of batch mode support. However,
19 most other features like ARD will work.
20 """
21
22 has_lengthscale = True
23
24 def __init__(self, nu=2.5, **kwargs):
25 if nu not in {0.5, 1.5, 2.5}:
26 raise RuntimeError("nu expected to be 0.5, 1.5, or 2.5")
27 super(MaternKernel, self).__init__(**kwargs)
28 self.nu = nu
29
30 def _nonkeops_covar_func(self, x1, x2, diag=False):
31 distance = self.covar_dist(x1, x2, diag=diag)
32 exp_component = torch.exp(-math.sqrt(self.nu * 2) * distance)
33
34 if self.nu == 0.5:
35 constant_component = 1
36 elif self.nu == 1.5:
37 constant_component = (math.sqrt(3) * distance).add(1)
38 elif self.nu == 2.5:
39 constant_component = (math.sqrt(5) * distance).add(1).add(5.0 / 3.0 * distance ** 2)
40 return constant_component * exp_component
41
42 def covar_func(self, x1, x2, diag=False):
43 # We only should use KeOps on big kernel matrices
44 # If we would otherwise be performing Cholesky inference, (or when just computing a kernel matrix diag)
45 # then don't apply KeOps
46 if (
47 diag
48 or x1.size(-2) < settings.max_cholesky_size.value()
49 or x2.size(-2) < settings.max_cholesky_size.value()
50 ):
51 return self._nonkeops_covar_func(x1, x2, diag=diag)
52 # TODO: x1 / x2 size checks are a work around for a very minor bug in KeOps.
53 # This bug is fixed on KeOps master, and we'll remove that part of the check
54 # when they cut a new release.
55 elif x1.size(-2) == 1 or x2.size(-2) == 1:
56 return self._nonkeops_covar_func(x1, x2, diag=diag)
57 else:
58 with torch.autograd.enable_grad():
59 # We only should use KeOps on big kernel matrices
60 # If we would otherwise be performing Cholesky inference, then don't apply KeOps
61 if (
62 x1.size(-2) < settings.max_cholesky_size.value()
63 or x2.size(-2) < settings.max_cholesky_size.value()
64 ):
65 x1_ = x1[..., :, None, :]
66 x2_ = x2[..., None, :, :]
67 else:
68 x1_ = KEOLazyTensor(x1[..., :, None, :])
69 x2_ = KEOLazyTensor(x2[..., None, :, :])
70
71 distance = ((x1_ - x2_) ** 2).sum(-1).sqrt()
72 exp_component = (-math.sqrt(self.nu * 2) * distance).exp()
73
74 if self.nu == 0.5:
75 constant_component = 1
76 elif self.nu == 1.5:
77 constant_component = (math.sqrt(3) * distance) + 1
78 elif self.nu == 2.5:
79 constant_component = (math.sqrt(5) * distance) + (1 + 5.0 / 3.0 * distance ** 2)
80
81 return constant_component * exp_component
82
83 def forward(self, x1, x2, diag=False, **params):
84 mean = x1.reshape(-1, x1.size(-1)).mean(0)[(None,) * (x1.dim() - 1)]
85
86 x1_ = (x1 - mean).div(self.lengthscale)
87 x2_ = (x2 - mean).div(self.lengthscale)
88
89 if diag:
90 return self.covar_func(x1_, x2_, diag=True)
91
92 covar_func = lambda x1, x2, diag=False: self.covar_func(x1, x2, diag)
93 return KeOpsLazyTensor(x1_, x2_, covar_func)
94
95
96 except ImportError:
97
98 class MaternKernel(KeOpsKernel):
99 def __init__(self, *args, **kwargs):
100 super().__init__()
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gpytorch/kernels/keops/matern_kernel.py b/gpytorch/kernels/keops/matern_kernel.py
--- a/gpytorch/kernels/keops/matern_kernel.py
+++ b/gpytorch/kernels/keops/matern_kernel.py
@@ -43,19 +43,21 @@
# We only should use KeOps on big kernel matrices
# If we would otherwise be performing Cholesky inference, (or when just computing a kernel matrix diag)
# then don't apply KeOps
- if (
- diag
- or x1.size(-2) < settings.max_cholesky_size.value()
- or x2.size(-2) < settings.max_cholesky_size.value()
- ):
- return self._nonkeops_covar_func(x1, x2, diag=diag)
- # TODO: x1 / x2 size checks are a work around for a very minor bug in KeOps.
- # This bug is fixed on KeOps master, and we'll remove that part of the check
- # when they cut a new release.
- elif x1.size(-2) == 1 or x2.size(-2) == 1:
- return self._nonkeops_covar_func(x1, x2, diag=diag)
- else:
- with torch.autograd.enable_grad():
+ # enable gradients to ensure that test time caches on small predictions are still
+ # backprop-able
+ with torch.autograd.enable_grad():
+ if (
+ diag
+ or x1.size(-2) < settings.max_cholesky_size.value()
+ or x2.size(-2) < settings.max_cholesky_size.value()
+ ):
+ return self._nonkeops_covar_func(x1, x2, diag=diag)
+ # TODO: x1 / x2 size checks are a work around for a very minor bug in KeOps.
+ # This bug is fixed on KeOps master, and we'll remove that part of the check
+ # when they cut a new release.
+ elif x1.size(-2) == 1 or x2.size(-2) == 1:
+ return self._nonkeops_covar_func(x1, x2, diag=diag)
+ else:
# We only should use KeOps on big kernel matrices
# If we would otherwise be performing Cholesky inference, then don't apply KeOps
if (
diff --git a/gpytorch/kernels/keops/rbf_kernel.py b/gpytorch/kernels/keops/rbf_kernel.py
--- a/gpytorch/kernels/keops/rbf_kernel.py
+++ b/gpytorch/kernels/keops/rbf_kernel.py
@@ -22,21 +22,28 @@
def _nonkeops_covar_func(self, x1, x2, diag=False):
return self.covar_dist(
- x1, x2, square_dist=True, diag=diag, dist_postprocess_func=postprocess_rbf, postprocess=True
+ x1,
+ x2,
+ square_dist=True,
+ diag=diag,
+ dist_postprocess_func=postprocess_rbf,
+ postprocess=True,
)
def covar_func(self, x1, x2, diag=False):
# We only should use KeOps on big kernel matrices
# If we would otherwise be performing Cholesky inference, (or when just computing a kernel matrix diag)
# then don't apply KeOps
- if (
- diag
- or x1.size(-2) < settings.max_cholesky_size.value()
- or x2.size(-2) < settings.max_cholesky_size.value()
- ):
- return self._nonkeops_covar_func(x1, x2, diag=diag)
-
+ # enable gradients to ensure that test time caches on small predictions are still
+ # backprop-able
with torch.autograd.enable_grad():
+ if (
+ diag
+ or x1.size(-2) < settings.max_cholesky_size.value()
+ or x2.size(-2) < settings.max_cholesky_size.value()
+ ):
+ return self._nonkeops_covar_func(x1, x2, diag=diag)
+
x1_ = KEOLazyTensor(x1[..., :, None, :])
x2_ = KEOLazyTensor(x2[..., None, :, :])
|
{"golden_diff": "diff --git a/gpytorch/kernels/keops/matern_kernel.py b/gpytorch/kernels/keops/matern_kernel.py\n--- a/gpytorch/kernels/keops/matern_kernel.py\n+++ b/gpytorch/kernels/keops/matern_kernel.py\n@@ -43,19 +43,21 @@\n # We only should use KeOps on big kernel matrices\n # If we would otherwise be performing Cholesky inference, (or when just computing a kernel matrix diag)\n # then don't apply KeOps\n- if (\n- diag\n- or x1.size(-2) < settings.max_cholesky_size.value()\n- or x2.size(-2) < settings.max_cholesky_size.value()\n- ):\n- return self._nonkeops_covar_func(x1, x2, diag=diag)\n- # TODO: x1 / x2 size checks are a work around for a very minor bug in KeOps.\n- # This bug is fixed on KeOps master, and we'll remove that part of the check\n- # when they cut a new release.\n- elif x1.size(-2) == 1 or x2.size(-2) == 1:\n- return self._nonkeops_covar_func(x1, x2, diag=diag)\n- else:\n- with torch.autograd.enable_grad():\n+ # enable gradients to ensure that test time caches on small predictions are still\n+ # backprop-able\n+ with torch.autograd.enable_grad():\n+ if (\n+ diag\n+ or x1.size(-2) < settings.max_cholesky_size.value()\n+ or x2.size(-2) < settings.max_cholesky_size.value()\n+ ):\n+ return self._nonkeops_covar_func(x1, x2, diag=diag)\n+ # TODO: x1 / x2 size checks are a work around for a very minor bug in KeOps.\n+ # This bug is fixed on KeOps master, and we'll remove that part of the check\n+ # when they cut a new release.\n+ elif x1.size(-2) == 1 or x2.size(-2) == 1:\n+ return self._nonkeops_covar_func(x1, x2, diag=diag)\n+ else:\n # We only should use KeOps on big kernel matrices\n # If we would otherwise be performing Cholesky inference, then don't apply KeOps\n if (\ndiff --git a/gpytorch/kernels/keops/rbf_kernel.py b/gpytorch/kernels/keops/rbf_kernel.py\n--- a/gpytorch/kernels/keops/rbf_kernel.py\n+++ b/gpytorch/kernels/keops/rbf_kernel.py\n@@ -22,21 +22,28 @@\n \n def _nonkeops_covar_func(self, x1, x2, diag=False):\n return self.covar_dist(\n- x1, x2, square_dist=True, diag=diag, dist_postprocess_func=postprocess_rbf, postprocess=True\n+ x1,\n+ x2,\n+ square_dist=True,\n+ diag=diag,\n+ dist_postprocess_func=postprocess_rbf,\n+ postprocess=True,\n )\n \n def covar_func(self, x1, x2, diag=False):\n # We only should use KeOps on big kernel matrices\n # If we would otherwise be performing Cholesky inference, (or when just computing a kernel matrix diag)\n # then don't apply KeOps\n- if (\n- diag\n- or x1.size(-2) < settings.max_cholesky_size.value()\n- or x2.size(-2) < settings.max_cholesky_size.value()\n- ):\n- return self._nonkeops_covar_func(x1, x2, diag=diag)\n-\n+ # enable gradients to ensure that test time caches on small predictions are still\n+ # backprop-able\n with torch.autograd.enable_grad():\n+ if (\n+ diag\n+ or x1.size(-2) < settings.max_cholesky_size.value()\n+ or x2.size(-2) < settings.max_cholesky_size.value()\n+ ):\n+ return self._nonkeops_covar_func(x1, x2, diag=diag)\n+\n x1_ = KEOLazyTensor(x1[..., :, None, :])\n x2_ = KEOLazyTensor(x2[..., None, :, :])\n", "issue": "[Bug] Gradient computation with KeOps kernel only works with low number of training data, fails otherwise\n# \ud83d\udc1b Bug\r\n\r\nHi,\r\n\r\nFirst of all, I would like to extend my thanks to all the developers for all the efforts you have put into both the research and this great package. \r\n\r\nConsider a GP with a KeOps kernel (e.g. gpytorch.kernels.keops.RBFKernel). If I train it with N=100 number of points, then the gradient of predictive mean can be obtained by torch.autograd.grad or .backward(), but set N=500 and an error will be thrown that the input tensor was not used in the graph. I have tested the script on two separate machines and a colab instance. Using the GPyTorch standard kernels will not run into this issue. I spent a good deal of time pinpointing what was wrong from bigger chunks of code and this seemed to be the issue. The gradient link seems to cut off between the covariance output of the KeOps kernel and the input (covar.x1). \r\n\r\nI have provided a minimal code right below that should quickly give you an idea of this somewhat strange behavior. It contains two test cases with N=100 (which passes) and 500 (fails). The code is from GPyTorch regression examples, I have only added the GP kernel and a few lines for calculating gradients at the end. \r\n\r\n## To reproduce\r\n\r\n```python\r\nimport math\r\nimport torch\r\nimport gpytorch\r\nimport time\r\n\r\n# We will use the simplest form of GP model, exact inference with gpytorch.kernels.keops.RBFKernel\r\nclass ExactGPModel(gpytorch.models.ExactGP):\r\n def __init__(self, train_x, train_y, likelihood):\r\n super(ExactGPModel, self).__init__(train_x, train_y, likelihood)\r\n self.mean_module = gpytorch.means.ConstantMean()\r\n self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.keops.RBFKernel())\r\n\r\n def forward(self, x):\r\n mean_x = self.mean_module(x)\r\n covar_x = self.covar_module(x)\r\n return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)\r\n\r\ndef train_and_eval_GP(N = 100):\r\n \"\"\"\r\n inputs:\r\n N (int): Number of training points\r\n \"\"\"\r\n # make train/val/test\r\n # Training data is 100 points in [0,1] inclusive regularly spaced\r\n train_x = torch.linspace(0, 1, N)\r\n # True function is sin(2*pi*x) with Gaussian noise\r\n train_y = torch.sin(train_x * (2 * math.pi)) + torch.randn(train_x.size()) * math.sqrt(0.04)\r\n # normalize features\r\n mean = train_x.mean()\r\n std = train_x.std() + 1e-6 # prevent dividing by 0\r\n train_x = (train_x - mean) / std\r\n\r\n # normalize labels\r\n mean, std = train_y.mean(),train_y.std()\r\n train_y = (train_y - mean) / std\r\n\r\n # make continguous\r\n train_x, train_y = train_x.contiguous(), train_y.contiguous()\r\n\r\n output_device = torch.device('cuda:0')\r\n\r\n train_x, train_y = train_x.to(output_device), train_y.to(output_device)\r\n\r\n # initialize likelihood and model\r\n likelihood = gpytorch.likelihoods.GaussianLikelihood().to(output_device)\r\n model = ExactGPModel(train_x, train_y, likelihood).to(output_device)\r\n\r\n # Find optimal model hyperparameters\r\n model.train()\r\n likelihood.train()\r\n\r\n # Use the adam optimizer\r\n optimizer = torch.optim.Adam(model.parameters(), lr=0.1) # Includes GaussianLikelihood parameters\r\n\r\n # \"Loss\" for GPs - the marginal log likelihood\r\n mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)\r\n\r\n training_iter = 20\r\n for i in range(training_iter):\r\n # Zero gradients from previous iteration\r\n optimizer.zero_grad()\r\n # Output from model\r\n output = model(train_x)\r\n # Calc loss and backprop gradients\r\n loss = -mll(output, train_y)\r\n loss.backward()\r\n optimizer.step()\r\n print(f'GP model trained.')\r\n\r\n # Get into evaluation (predictive posterior) mode\r\n model.eval()\r\n likelihood.eval()\r\n\r\n # Test points are regularly spaced along [0,1]\r\n test_x = torch.linspace(0, 1, 51, requires_grad=True).to(output_device).contiguous()\r\n \r\n # Make predictions by feeding model through likelihood\r\n with gpytorch.settings.fast_pred_var():\r\n observed_pred = likelihood(model(test_x))\r\n assert torch.autograd.grad(observed_pred.mean.sum(), test_x, retain_graph=True) is not None\r\n print('gradient exists:')\r\n print(torch.autograd.grad(observed_pred.mean.sum(), test_x, retain_graph=True))\r\n\r\nif __name__ == \"__main__\":\r\n Ns = [100, 500] #test cases\r\n for n in Ns:\r\n try:\r\n print(f'testing with {n} points...')\r\n train_and_eval_GP(N = n) \r\n print('success!')\r\n except Exception as e:\r\n print('failed.')\r\n print(e)\r\n```\r\n\r\n** Stack trace/error message **\r\n```\r\ntesting with 100 points...\r\nGP model trained.\r\ngradient exists:\r\n(tensor([-2.6629e+00, -2.6507e+00, -2.6344e+00, -2.6138e+00, -2.5891e+00,\r\n ...,\r\n 9.1844e-01], device='cuda:0'),)\r\nsuccess!\r\ntesting with 500 points...\r\nGP model trained.\r\nfailed.\r\nOne of the differentiated Tensors appears to not have been used in the graph. Set allow_unused=True if this is the desired behavior.\r\n```\r\n\r\n## Expected Behavior\r\n\r\nSetting the kernel to the standard, non-KeOps kernel (gpytorch.kernels.RBFKernel) we get the gradients for the second case, shown below. However, I can't simply use it since I'm working on a larger dataset that will run out of memory if I do so.\r\n\r\n```\r\ntesting with 100 points...\r\nGP model trained.\r\ngradient exists:\r\n(tensor([-2.5885, -2.5870, -2.5819, -2.5732, -2.5609, -2.5449, -2.5254, -2.5022,\r\n ...,\r\n 0.6977, 0.7880, 0.8762], device='cuda:0'),)\r\nsuccess!\r\ntesting with 500 points...\r\nGP model trained.\r\ngradient exists:\r\n(tensor([-2.4751, -2.4741, -2.4698, -2.4622, -2.4514, -2.4373, -2.4199, -2.3993,\r\n ...,\r\n 0.4727, 0.5600, 0.6462], device='cuda:0'),)\r\nsuccess!\r\n```\r\n## System information\r\n\r\n**Please complete the following information:**\r\n- <!-- GPyTorch Version (run `print(gpytorch.__version__)` --> GPyTorch 1.6.0\r\n- <!-- PyTorch Version (run `print(torch.__version__)` --> PyTorch 1.10.1\r\n- <!-- KeOps Version --> KeOps 1.5\r\n- <!-- Computer OS --> Ubuntu 20.04.3 LTS\r\n\r\n## Additional context\r\nI know that the issue might be unrelated to GPyTorch as this clearly stems from the keops kernel. It is however difficult to track, so I thought I'd report it here. \r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport torch\n\nfrom ... import settings\nfrom ...lazy import KeOpsLazyTensor\nfrom ..rbf_kernel import postprocess_rbf\nfrom .keops_kernel import KeOpsKernel\n\ntry:\n from pykeops.torch import LazyTensor as KEOLazyTensor\n\n class RBFKernel(KeOpsKernel):\n \"\"\"\n Implements the RBF kernel using KeOps as a driver for kernel matrix multiplies.\n\n This class can be used as a drop in replacement for gpytorch.kernels.RBFKernel in most cases, and supports\n the same arguments. There are currently a few limitations, for example a lack of batch mode support. However,\n most other features like ARD will work.\n \"\"\"\n\n has_lengthscale = True\n\n def _nonkeops_covar_func(self, x1, x2, diag=False):\n return self.covar_dist(\n x1, x2, square_dist=True, diag=diag, dist_postprocess_func=postprocess_rbf, postprocess=True\n )\n\n def covar_func(self, x1, x2, diag=False):\n # We only should use KeOps on big kernel matrices\n # If we would otherwise be performing Cholesky inference, (or when just computing a kernel matrix diag)\n # then don't apply KeOps\n if (\n diag\n or x1.size(-2) < settings.max_cholesky_size.value()\n or x2.size(-2) < settings.max_cholesky_size.value()\n ):\n return self._nonkeops_covar_func(x1, x2, diag=diag)\n\n with torch.autograd.enable_grad():\n x1_ = KEOLazyTensor(x1[..., :, None, :])\n x2_ = KEOLazyTensor(x2[..., None, :, :])\n\n K = (-((x1_ - x2_) ** 2).sum(-1) / 2).exp()\n\n return K\n\n def forward(self, x1, x2, diag=False, **params):\n x1_ = x1.div(self.lengthscale)\n x2_ = x2.div(self.lengthscale)\n\n covar_func = lambda x1, x2, diag=diag: self.covar_func(x1, x2, diag)\n\n if diag:\n return covar_func(x1_, x2_, diag=True)\n\n return KeOpsLazyTensor(x1_, x2_, covar_func)\n\n\nexcept ImportError:\n\n class RBFKernel(KeOpsKernel):\n def __init__(self, *args, **kwargs):\n super().__init__()\n", "path": "gpytorch/kernels/keops/rbf_kernel.py"}, {"content": "#!/usr/bin/env python3\nimport math\n\nimport torch\n\nfrom ... import settings\nfrom ...lazy import KeOpsLazyTensor\nfrom .keops_kernel import KeOpsKernel\n\ntry:\n from pykeops.torch import LazyTensor as KEOLazyTensor\n\n class MaternKernel(KeOpsKernel):\n \"\"\"\n Implements the Matern kernel using KeOps as a driver for kernel matrix multiplies.\n\n This class can be used as a drop in replacement for gpytorch.kernels.MaternKernel in most cases, and supports\n the same arguments. There are currently a few limitations, for example a lack of batch mode support. However,\n most other features like ARD will work.\n \"\"\"\n\n has_lengthscale = True\n\n def __init__(self, nu=2.5, **kwargs):\n if nu not in {0.5, 1.5, 2.5}:\n raise RuntimeError(\"nu expected to be 0.5, 1.5, or 2.5\")\n super(MaternKernel, self).__init__(**kwargs)\n self.nu = nu\n\n def _nonkeops_covar_func(self, x1, x2, diag=False):\n distance = self.covar_dist(x1, x2, diag=diag)\n exp_component = torch.exp(-math.sqrt(self.nu * 2) * distance)\n\n if self.nu == 0.5:\n constant_component = 1\n elif self.nu == 1.5:\n constant_component = (math.sqrt(3) * distance).add(1)\n elif self.nu == 2.5:\n constant_component = (math.sqrt(5) * distance).add(1).add(5.0 / 3.0 * distance ** 2)\n return constant_component * exp_component\n\n def covar_func(self, x1, x2, diag=False):\n # We only should use KeOps on big kernel matrices\n # If we would otherwise be performing Cholesky inference, (or when just computing a kernel matrix diag)\n # then don't apply KeOps\n if (\n diag\n or x1.size(-2) < settings.max_cholesky_size.value()\n or x2.size(-2) < settings.max_cholesky_size.value()\n ):\n return self._nonkeops_covar_func(x1, x2, diag=diag)\n # TODO: x1 / x2 size checks are a work around for a very minor bug in KeOps.\n # This bug is fixed on KeOps master, and we'll remove that part of the check\n # when they cut a new release.\n elif x1.size(-2) == 1 or x2.size(-2) == 1:\n return self._nonkeops_covar_func(x1, x2, diag=diag)\n else:\n with torch.autograd.enable_grad():\n # We only should use KeOps on big kernel matrices\n # If we would otherwise be performing Cholesky inference, then don't apply KeOps\n if (\n x1.size(-2) < settings.max_cholesky_size.value()\n or x2.size(-2) < settings.max_cholesky_size.value()\n ):\n x1_ = x1[..., :, None, :]\n x2_ = x2[..., None, :, :]\n else:\n x1_ = KEOLazyTensor(x1[..., :, None, :])\n x2_ = KEOLazyTensor(x2[..., None, :, :])\n\n distance = ((x1_ - x2_) ** 2).sum(-1).sqrt()\n exp_component = (-math.sqrt(self.nu * 2) * distance).exp()\n\n if self.nu == 0.5:\n constant_component = 1\n elif self.nu == 1.5:\n constant_component = (math.sqrt(3) * distance) + 1\n elif self.nu == 2.5:\n constant_component = (math.sqrt(5) * distance) + (1 + 5.0 / 3.0 * distance ** 2)\n\n return constant_component * exp_component\n\n def forward(self, x1, x2, diag=False, **params):\n mean = x1.reshape(-1, x1.size(-1)).mean(0)[(None,) * (x1.dim() - 1)]\n\n x1_ = (x1 - mean).div(self.lengthscale)\n x2_ = (x2 - mean).div(self.lengthscale)\n\n if diag:\n return self.covar_func(x1_, x2_, diag=True)\n\n covar_func = lambda x1, x2, diag=False: self.covar_func(x1, x2, diag)\n return KeOpsLazyTensor(x1_, x2_, covar_func)\n\n\nexcept ImportError:\n\n class MaternKernel(KeOpsKernel):\n def __init__(self, *args, **kwargs):\n super().__init__()\n", "path": "gpytorch/kernels/keops/matern_kernel.py"}], "after_files": [{"content": "#!/usr/bin/env python3\nimport torch\n\nfrom ... import settings\nfrom ...lazy import KeOpsLazyTensor\nfrom ..rbf_kernel import postprocess_rbf\nfrom .keops_kernel import KeOpsKernel\n\ntry:\n from pykeops.torch import LazyTensor as KEOLazyTensor\n\n class RBFKernel(KeOpsKernel):\n \"\"\"\n Implements the RBF kernel using KeOps as a driver for kernel matrix multiplies.\n\n This class can be used as a drop in replacement for gpytorch.kernels.RBFKernel in most cases, and supports\n the same arguments. There are currently a few limitations, for example a lack of batch mode support. However,\n most other features like ARD will work.\n \"\"\"\n\n has_lengthscale = True\n\n def _nonkeops_covar_func(self, x1, x2, diag=False):\n return self.covar_dist(\n x1,\n x2,\n square_dist=True,\n diag=diag,\n dist_postprocess_func=postprocess_rbf,\n postprocess=True,\n )\n\n def covar_func(self, x1, x2, diag=False):\n # We only should use KeOps on big kernel matrices\n # If we would otherwise be performing Cholesky inference, (or when just computing a kernel matrix diag)\n # then don't apply KeOps\n # enable gradients to ensure that test time caches on small predictions are still\n # backprop-able\n with torch.autograd.enable_grad():\n if (\n diag\n or x1.size(-2) < settings.max_cholesky_size.value()\n or x2.size(-2) < settings.max_cholesky_size.value()\n ):\n return self._nonkeops_covar_func(x1, x2, diag=diag)\n\n x1_ = KEOLazyTensor(x1[..., :, None, :])\n x2_ = KEOLazyTensor(x2[..., None, :, :])\n\n K = (-((x1_ - x2_) ** 2).sum(-1) / 2).exp()\n\n return K\n\n def forward(self, x1, x2, diag=False, **params):\n x1_ = x1.div(self.lengthscale)\n x2_ = x2.div(self.lengthscale)\n\n covar_func = lambda x1, x2, diag=diag: self.covar_func(x1, x2, diag)\n\n if diag:\n return covar_func(x1_, x2_, diag=True)\n\n return KeOpsLazyTensor(x1_, x2_, covar_func)\n\n\nexcept ImportError:\n\n class RBFKernel(KeOpsKernel):\n def __init__(self, *args, **kwargs):\n super().__init__()\n", "path": "gpytorch/kernels/keops/rbf_kernel.py"}, {"content": "#!/usr/bin/env python3\nimport math\n\nimport torch\n\nfrom ... import settings\nfrom ...lazy import KeOpsLazyTensor\nfrom .keops_kernel import KeOpsKernel\n\ntry:\n from pykeops.torch import LazyTensor as KEOLazyTensor\n\n class MaternKernel(KeOpsKernel):\n \"\"\"\n Implements the Matern kernel using KeOps as a driver for kernel matrix multiplies.\n\n This class can be used as a drop in replacement for gpytorch.kernels.MaternKernel in most cases, and supports\n the same arguments. There are currently a few limitations, for example a lack of batch mode support. However,\n most other features like ARD will work.\n \"\"\"\n\n has_lengthscale = True\n\n def __init__(self, nu=2.5, **kwargs):\n if nu not in {0.5, 1.5, 2.5}:\n raise RuntimeError(\"nu expected to be 0.5, 1.5, or 2.5\")\n super(MaternKernel, self).__init__(**kwargs)\n self.nu = nu\n\n def _nonkeops_covar_func(self, x1, x2, diag=False):\n distance = self.covar_dist(x1, x2, diag=diag)\n exp_component = torch.exp(-math.sqrt(self.nu * 2) * distance)\n\n if self.nu == 0.5:\n constant_component = 1\n elif self.nu == 1.5:\n constant_component = (math.sqrt(3) * distance).add(1)\n elif self.nu == 2.5:\n constant_component = (math.sqrt(5) * distance).add(1).add(5.0 / 3.0 * distance ** 2)\n return constant_component * exp_component\n\n def covar_func(self, x1, x2, diag=False):\n # We only should use KeOps on big kernel matrices\n # If we would otherwise be performing Cholesky inference, (or when just computing a kernel matrix diag)\n # then don't apply KeOps\n # enable gradients to ensure that test time caches on small predictions are still\n # backprop-able\n with torch.autograd.enable_grad():\n if (\n diag\n or x1.size(-2) < settings.max_cholesky_size.value()\n or x2.size(-2) < settings.max_cholesky_size.value()\n ):\n return self._nonkeops_covar_func(x1, x2, diag=diag)\n # TODO: x1 / x2 size checks are a work around for a very minor bug in KeOps.\n # This bug is fixed on KeOps master, and we'll remove that part of the check\n # when they cut a new release.\n elif x1.size(-2) == 1 or x2.size(-2) == 1:\n return self._nonkeops_covar_func(x1, x2, diag=diag)\n else:\n # We only should use KeOps on big kernel matrices\n # If we would otherwise be performing Cholesky inference, then don't apply KeOps\n if (\n x1.size(-2) < settings.max_cholesky_size.value()\n or x2.size(-2) < settings.max_cholesky_size.value()\n ):\n x1_ = x1[..., :, None, :]\n x2_ = x2[..., None, :, :]\n else:\n x1_ = KEOLazyTensor(x1[..., :, None, :])\n x2_ = KEOLazyTensor(x2[..., None, :, :])\n\n distance = ((x1_ - x2_) ** 2).sum(-1).sqrt()\n exp_component = (-math.sqrt(self.nu * 2) * distance).exp()\n\n if self.nu == 0.5:\n constant_component = 1\n elif self.nu == 1.5:\n constant_component = (math.sqrt(3) * distance) + 1\n elif self.nu == 2.5:\n constant_component = (math.sqrt(5) * distance) + (1 + 5.0 / 3.0 * distance ** 2)\n\n return constant_component * exp_component\n\n def forward(self, x1, x2, diag=False, **params):\n mean = x1.reshape(-1, x1.size(-1)).mean(0)[(None,) * (x1.dim() - 1)]\n\n x1_ = (x1 - mean).div(self.lengthscale)\n x2_ = (x2 - mean).div(self.lengthscale)\n\n if diag:\n return self.covar_func(x1_, x2_, diag=True)\n\n covar_func = lambda x1, x2, diag=False: self.covar_func(x1, x2, diag)\n return KeOpsLazyTensor(x1_, x2_, covar_func)\n\n\nexcept ImportError:\n\n class MaternKernel(KeOpsKernel):\n def __init__(self, *args, **kwargs):\n super().__init__()\n", "path": "gpytorch/kernels/keops/matern_kernel.py"}]}
| 4,010 | 992 |
gh_patches_debug_8533
|
rasdani/github-patches
|
git_diff
|
pyinstaller__pyinstaller-2112
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cherrypy >= 6.1.0 fails tests
From the cherrypy [changelog](https://github.com/cherrypy/cherrypy/blob/master/CHANGES.txt):
```
6.1.0
-----
* Combined wsgiserver2 and wsgiserver3 modules into a
single module, ``cherrypy.wsgiserver``.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PyInstaller/hooks/hook-cherrypy.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2015-2016, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License with exception
5 # for distributing bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #-----------------------------------------------------------------------------
9 #
10 # CherryPy is a minimalist Python web framework.
11 #
12 # http://www.cherrypy.org/
13 #
14 # Tested with CherryPy 5.0.1
15
16
17 from PyInstaller.utils.hooks import collect_submodules
18
19
20 hiddenimports = collect_submodules('cherrypy.wsgiserver')
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/PyInstaller/hooks/hook-cherrypy.py b/PyInstaller/hooks/hook-cherrypy.py
deleted file mode 100644
--- a/PyInstaller/hooks/hook-cherrypy.py
+++ /dev/null
@@ -1,20 +0,0 @@
-#-----------------------------------------------------------------------------
-# Copyright (c) 2015-2016, PyInstaller Development Team.
-#
-# Distributed under the terms of the GNU General Public License with exception
-# for distributing bootloader.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-#
-# CherryPy is a minimalist Python web framework.
-#
-# http://www.cherrypy.org/
-#
-# Tested with CherryPy 5.0.1
-
-
-from PyInstaller.utils.hooks import collect_submodules
-
-
-hiddenimports = collect_submodules('cherrypy.wsgiserver')
\ No newline at end of file
|
{"golden_diff": "diff --git a/PyInstaller/hooks/hook-cherrypy.py b/PyInstaller/hooks/hook-cherrypy.py\ndeleted file mode 100644\n--- a/PyInstaller/hooks/hook-cherrypy.py\n+++ /dev/null\n@@ -1,20 +0,0 @@\n-#-----------------------------------------------------------------------------\n-# Copyright (c) 2015-2016, PyInstaller Development Team.\n-#\n-# Distributed under the terms of the GNU General Public License with exception\n-# for distributing bootloader.\n-#\n-# The full license is in the file COPYING.txt, distributed with this software.\n-#-----------------------------------------------------------------------------\n-#\n-# CherryPy is a minimalist Python web framework.\n-#\n-# http://www.cherrypy.org/\n-#\n-# Tested with CherryPy 5.0.1\n-\n-\n-from PyInstaller.utils.hooks import collect_submodules\n-\n-\n-hiddenimports = collect_submodules('cherrypy.wsgiserver')\n\\ No newline at end of file\n", "issue": "cherrypy >= 6.1.0 fails tests\nFrom the cherrypy [changelog](https://github.com/cherrypy/cherrypy/blob/master/CHANGES.txt):\n\n```\n6.1.0\n-----\n\n* Combined wsgiserver2 and wsgiserver3 modules into a\n single module, ``cherrypy.wsgiserver``.\n```\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2015-2016, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n#\n# CherryPy is a minimalist Python web framework.\n#\n# http://www.cherrypy.org/\n#\n# Tested with CherryPy 5.0.1\n\n\nfrom PyInstaller.utils.hooks import collect_submodules\n\n\nhiddenimports = collect_submodules('cherrypy.wsgiserver')", "path": "PyInstaller/hooks/hook-cherrypy.py"}], "after_files": [{"content": null, "path": "PyInstaller/hooks/hook-cherrypy.py"}]}
| 502 | 214 |
gh_patches_debug_38994
|
rasdani/github-patches
|
git_diff
|
pyjanitor-devs__pyjanitor-1123
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[EHN] Add `jointly` option for `min_max_scale`
<!-- Thank you for your PR!
BEFORE YOU CONTINUE! Please add the appropriate three-letter abbreviation to your title.
The abbreviations can be:
- [DOC]: Documentation fixes.
- [ENH]: Code contributions and new features.
- [TST]: Test-related contributions.
- [INF]: Infrastructure-related contributions.
Also, do not forget to tag the relevant issue here as well.
Finally, as commits come in, don't forget to regularly rebase!
-->
# PR Description
Please describe the changes proposed in the pull request:
- Add an option for `min_max_scale` support to transform each column values or entire values
- Default transform each column, similar behavior to [sklearn.preprocessing.MinMaxScaler](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html)
<!-- Doing so provides maintainers with context on what the PR is, and can help us more effectively review your PR. -->
<!-- Please also identify below which issue that has been raised that you are going to close. -->
**This PR resolves #1067.**
<!-- As you go down the PR template, please feel free to delete sections that are irrelevant. -->
# PR Checklist
<!-- This checklist exists for newcomers who are not yet familiar with our requirements. If you are experienced with
the project, please feel free to delete this section. -->
Please ensure that you have done the following:
1. [x] PR in from a fork off your branch. Do not PR from `<your_username>`:`dev`, but rather from `<your_username>`:`<feature-branch_name>`.
<!-- Doing this helps us keep the commit history much cleaner than it would otherwise be. -->
2. [x] If you're not on the contributors list, add yourself to `AUTHORS.md`.
<!-- We'd like to acknowledge your contributions! -->
3. [x] Add a line to `CHANGELOG.md` under the latest version header (i.e. the one that is "on deck") describing the contribution.
- Do use some discretion here; if there are multiple PRs that are related, keep them in a single line.
# Automatic checks
There will be automatic checks run on the PR. These include:
- Building a preview of the docs on Netlify
- Automatically linting the code
- Making sure the code is documented
- Making sure that all tests are passed
- Making sure that code coverage doesn't go down.
# Relevant Reviewers
<!-- Finally, please tag relevant maintainers to review. -->
Please tag maintainers to review.
- @ericmjl
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `janitor/functions/min_max_scale.py`
Content:
```
1 from __future__ import annotations
2
3 import pandas_flavor as pf
4 import pandas as pd
5
6 from janitor.utils import deprecated_alias
7 from janitor.utils import deprecated_kwargs
8
9
10 @pf.register_dataframe_method
11 @deprecated_kwargs(
12 "old_min",
13 "old_max",
14 "new_min",
15 "new_max",
16 message=(
17 "The keyword argument {argument!r} of {func_name!r} is deprecated. "
18 "Please use 'feature_range' instead."
19 ),
20 )
21 @deprecated_alias(col_name="column_name")
22 def min_max_scale(
23 df: pd.DataFrame,
24 feature_range: tuple[int | float, int | float] = (0, 1),
25 column_name: str | int | list[str | int] | pd.Index = None,
26 jointly: bool = False,
27 ) -> pd.DataFrame:
28 """
29 Scales DataFrame to between a minimum and maximum value.
30
31 One can optionally set a new target **minimum** and **maximum** value
32 using the `feature_range` keyword argument.
33
34 If `column_name` is specified, then only that column(s) of data is scaled.
35 Otherwise, the entire dataframe is scaled.
36 If `jointly` is `True`, the `column_names` provided entire dataframe will
37 be regnozied as the one to jointly scale. Otherwise, each column of data
38 will be scaled separately.
39
40 Example: Basic usage.
41
42 >>> import pandas as pd
43 >>> import janitor
44 >>> df = pd.DataFrame({'a':[1, 2], 'b':[0, 1]})
45 >>> df.min_max_scale()
46 a b
47 0 0.0 0.0
48 1 1.0 1.0
49 >>> df.min_max_scale(jointly=True)
50 a b
51 0 0.5 0.0
52 1 1.0 0.5
53
54 Example: Setting custom minimum and maximum.
55
56 >>> import pandas as pd
57 >>> import janitor
58 >>> df = pd.DataFrame({'a':[1, 2], 'b':[0, 1]})
59 >>> df.min_max_scale(feature_range=(0, 100))
60 a b
61 0 0.0 0.0
62 1 100.0 100.0
63 >>> df.min_max_scale(feature_range=(0, 100), jointly=True)
64 a b
65 0 50.0 0.0
66 1 100.0 50.0
67
68 Example: Apply min-max to the selected columns.
69
70 >>> import pandas as pd
71 >>> import janitor
72 >>> df = pd.DataFrame({'a':[1, 2], 'b':[0, 1], 'c': [1, 0]})
73 >>> df.min_max_scale(
74 ... feature_range=(0, 100),
75 ... column_name=["a", "c"],
76 ... )
77 a b c
78 0 0.0 0 100.0
79 1 100.0 1 0.0
80 >>> df.min_max_scale(
81 ... feature_range=(0, 100),
82 ... column_name=["a", "c"],
83 ... jointly=True,
84 ... )
85 a b c
86 0 50.0 0 50.0
87 1 100.0 1 0.0
88 >>> df.min_max_scale(feature_range=(0, 100), column_name='a')
89 a b c
90 0 0.0 0 1
91 1 100.0 1 0
92
93 The aforementioned example might be applied to something like scaling the
94 isoelectric points of amino acids. While technically they range from
95 approx 3-10, we can also think of them on the pH scale which ranges from
96 1 to 14. Hence, 3 gets scaled not to 0 but approx. 0.15 instead, while 10
97 gets scaled to approx. 0.69 instead.
98
99 :param df: A pandas DataFrame.
100 :param feature_range: (optional) Desired range of transformed data.
101 :param column_name: (optional) The column on which to perform scaling.
102 :param jointly: (bool) Scale the entire data if Ture.
103 :returns: A pandas DataFrame with scaled data.
104 :raises ValueError: if `feature_range` isn't tuple type.
105 :raises ValueError: if the length of `feature_range` isn't equal to two.
106 :raises ValueError: if the element of `feature_range` isn't number type.
107 :raises ValueError: if `feature_range[1]` <= `feature_range[0]`.
108
109 Changed in version 0.24.0: Deleted "old_min", "old_max", "new_min", and
110 "new_max" options.
111 Changed in version 0.24.0: Added "feature_range", and "jointly" options.
112 """
113
114 if not (
115 isinstance(feature_range, (tuple, list))
116 and len(feature_range) == 2
117 and all((isinstance(i, (int, float))) for i in feature_range)
118 and feature_range[1] > feature_range[0]
119 ):
120 raise ValueError(
121 "`feature_range` should be a range type contains number element, "
122 "the first element must be greater than the second one"
123 )
124
125 if column_name is not None:
126 df = df.copy() # Avoid to change the original DataFrame.
127
128 old_feature_range = df[column_name].pipe(min_max_value, jointly)
129 df[column_name] = df[column_name].pipe(
130 apply_min_max,
131 *old_feature_range,
132 *feature_range,
133 )
134 else:
135 old_feature_range = df.pipe(min_max_value, jointly)
136 df = df.pipe(
137 apply_min_max,
138 *old_feature_range,
139 *feature_range,
140 )
141
142 return df
143
144
145 def min_max_value(df: pd.DataFrame, jointly: bool) -> tuple:
146 """
147 Return the minimum and maximum of DataFrame.
148
149 Use the `jointly` flag to control returning entire data or each column.
150
151 .. # noqa: DAR101
152 .. # noqa: DAR201
153 """
154
155 if jointly:
156 mmin = df.min().min()
157 mmax = df.max().max()
158 else:
159 mmin = df.min()
160 mmax = df.max()
161
162 return mmin, mmax
163
164
165 def apply_min_max(
166 df: pd.DataFrame,
167 old_min: int | float | pd.Series,
168 old_max: int | float | pd.Series,
169 new_min: int | float | pd.Series,
170 new_max: int | float | pd.Series,
171 ) -> pd.DataFrame:
172 """
173 Apply minimax scaler to DataFrame.
174
175 Notes
176 -----
177 - Inputting minimum and maximum type
178 - int or float : It will apply minimax to the entire DataFrame.
179 - Series : It will apply minimax to each column.
180
181 .. # noqa: DAR101
182 .. # noqa: DAR201
183 """
184
185 old_range = old_max - old_min
186 new_range = new_max - new_min
187
188 return (df - old_min) * new_range / old_range + new_min
189
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/janitor/functions/min_max_scale.py b/janitor/functions/min_max_scale.py
--- a/janitor/functions/min_max_scale.py
+++ b/janitor/functions/min_max_scale.py
@@ -96,6 +96,12 @@
1 to 14. Hence, 3 gets scaled not to 0 but approx. 0.15 instead, while 10
gets scaled to approx. 0.69 instead.
+ !!! summary "Version Changed"
+
+ - 0.24.0
+ - Deleted `old_min`, `old_max`, `new_min`, and `new_max` options.
+ - Added `feature_range`, and `jointly` options.
+
:param df: A pandas DataFrame.
:param feature_range: (optional) Desired range of transformed data.
:param column_name: (optional) The column on which to perform scaling.
@@ -105,10 +111,6 @@
:raises ValueError: if the length of `feature_range` isn't equal to two.
:raises ValueError: if the element of `feature_range` isn't number type.
:raises ValueError: if `feature_range[1]` <= `feature_range[0]`.
-
- Changed in version 0.24.0: Deleted "old_min", "old_max", "new_min", and
- "new_max" options.
- Changed in version 0.24.0: Added "feature_range", and "jointly" options.
"""
if not (
@@ -125,16 +127,16 @@
if column_name is not None:
df = df.copy() # Avoid to change the original DataFrame.
- old_feature_range = df[column_name].pipe(min_max_value, jointly)
+ old_feature_range = df[column_name].pipe(_min_max_value, jointly)
df[column_name] = df[column_name].pipe(
- apply_min_max,
+ _apply_min_max,
*old_feature_range,
*feature_range,
)
else:
- old_feature_range = df.pipe(min_max_value, jointly)
+ old_feature_range = df.pipe(_min_max_value, jointly)
df = df.pipe(
- apply_min_max,
+ _apply_min_max,
*old_feature_range,
*feature_range,
)
@@ -142,7 +144,7 @@
return df
-def min_max_value(df: pd.DataFrame, jointly: bool) -> tuple:
+def _min_max_value(df: pd.DataFrame, jointly: bool) -> tuple:
"""
Return the minimum and maximum of DataFrame.
@@ -162,7 +164,7 @@
return mmin, mmax
-def apply_min_max(
+def _apply_min_max(
df: pd.DataFrame,
old_min: int | float | pd.Series,
old_max: int | float | pd.Series,
|
{"golden_diff": "diff --git a/janitor/functions/min_max_scale.py b/janitor/functions/min_max_scale.py\n--- a/janitor/functions/min_max_scale.py\n+++ b/janitor/functions/min_max_scale.py\n@@ -96,6 +96,12 @@\n 1 to 14. Hence, 3 gets scaled not to 0 but approx. 0.15 instead, while 10\r\n gets scaled to approx. 0.69 instead.\r\n \r\n+ !!! summary \"Version Changed\"\r\n+\r\n+ - 0.24.0\r\n+ - Deleted `old_min`, `old_max`, `new_min`, and `new_max` options.\r\n+ - Added `feature_range`, and `jointly` options.\r\n+\r\n :param df: A pandas DataFrame.\r\n :param feature_range: (optional) Desired range of transformed data.\r\n :param column_name: (optional) The column on which to perform scaling.\r\n@@ -105,10 +111,6 @@\n :raises ValueError: if the length of `feature_range` isn't equal to two.\r\n :raises ValueError: if the element of `feature_range` isn't number type.\r\n :raises ValueError: if `feature_range[1]` <= `feature_range[0]`.\r\n-\r\n- Changed in version 0.24.0: Deleted \"old_min\", \"old_max\", \"new_min\", and\r\n- \"new_max\" options.\r\n- Changed in version 0.24.0: Added \"feature_range\", and \"jointly\" options.\r\n \"\"\"\r\n \r\n if not (\r\n@@ -125,16 +127,16 @@\n if column_name is not None:\r\n df = df.copy() # Avoid to change the original DataFrame.\r\n \r\n- old_feature_range = df[column_name].pipe(min_max_value, jointly)\r\n+ old_feature_range = df[column_name].pipe(_min_max_value, jointly)\r\n df[column_name] = df[column_name].pipe(\r\n- apply_min_max,\r\n+ _apply_min_max,\r\n *old_feature_range,\r\n *feature_range,\r\n )\r\n else:\r\n- old_feature_range = df.pipe(min_max_value, jointly)\r\n+ old_feature_range = df.pipe(_min_max_value, jointly)\r\n df = df.pipe(\r\n- apply_min_max,\r\n+ _apply_min_max,\r\n *old_feature_range,\r\n *feature_range,\r\n )\r\n@@ -142,7 +144,7 @@\n return df\r\n \r\n \r\n-def min_max_value(df: pd.DataFrame, jointly: bool) -> tuple:\r\n+def _min_max_value(df: pd.DataFrame, jointly: bool) -> tuple:\r\n \"\"\"\r\n Return the minimum and maximum of DataFrame.\r\n \r\n@@ -162,7 +164,7 @@\n return mmin, mmax\r\n \r\n \r\n-def apply_min_max(\r\n+def _apply_min_max(\r\n df: pd.DataFrame,\r\n old_min: int | float | pd.Series,\r\n old_max: int | float | pd.Series,\n", "issue": "[EHN] Add `jointly` option for `min_max_scale`\n<!-- Thank you for your PR!\r\n\r\nBEFORE YOU CONTINUE! Please add the appropriate three-letter abbreviation to your title.\r\n\r\nThe abbreviations can be:\r\n- [DOC]: Documentation fixes.\r\n- [ENH]: Code contributions and new features.\r\n- [TST]: Test-related contributions.\r\n- [INF]: Infrastructure-related contributions.\r\n\r\nAlso, do not forget to tag the relevant issue here as well.\r\n\r\nFinally, as commits come in, don't forget to regularly rebase!\r\n-->\r\n\r\n# PR Description\r\n\r\nPlease describe the changes proposed in the pull request:\r\n\r\n- Add an option for `min_max_scale` support to transform each column values or entire values\r\n- Default transform each column, similar behavior to [sklearn.preprocessing.MinMaxScaler](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html)\r\n\r\n<!-- Doing so provides maintainers with context on what the PR is, and can help us more effectively review your PR. -->\r\n\r\n<!-- Please also identify below which issue that has been raised that you are going to close. -->\r\n\r\n**This PR resolves #1067.**\r\n\r\n<!-- As you go down the PR template, please feel free to delete sections that are irrelevant. -->\r\n\r\n# PR Checklist\r\n\r\n<!-- This checklist exists for newcomers who are not yet familiar with our requirements. If you are experienced with\r\nthe project, please feel free to delete this section. -->\r\n\r\nPlease ensure that you have done the following:\r\n\r\n1. [x] PR in from a fork off your branch. Do not PR from `<your_username>`:`dev`, but rather from `<your_username>`:`<feature-branch_name>`.\r\n<!-- Doing this helps us keep the commit history much cleaner than it would otherwise be. -->\r\n2. [x] If you're not on the contributors list, add yourself to `AUTHORS.md`.\r\n<!-- We'd like to acknowledge your contributions! -->\r\n3. [x] Add a line to `CHANGELOG.md` under the latest version header (i.e. the one that is \"on deck\") describing the contribution.\r\n - Do use some discretion here; if there are multiple PRs that are related, keep them in a single line.\r\n\r\n# Automatic checks\r\n\r\nThere will be automatic checks run on the PR. These include:\r\n\r\n- Building a preview of the docs on Netlify\r\n- Automatically linting the code\r\n- Making sure the code is documented\r\n- Making sure that all tests are passed\r\n- Making sure that code coverage doesn't go down.\r\n\r\n# Relevant Reviewers\r\n\r\n<!-- Finally, please tag relevant maintainers to review. -->\r\n\r\nPlease tag maintainers to review.\r\n\r\n- @ericmjl\r\n\n", "before_files": [{"content": "from __future__ import annotations\r\n\r\nimport pandas_flavor as pf\r\nimport pandas as pd\r\n\r\nfrom janitor.utils import deprecated_alias\r\nfrom janitor.utils import deprecated_kwargs\r\n\r\n\r\[email protected]_dataframe_method\r\n@deprecated_kwargs(\r\n \"old_min\",\r\n \"old_max\",\r\n \"new_min\",\r\n \"new_max\",\r\n message=(\r\n \"The keyword argument {argument!r} of {func_name!r} is deprecated. \"\r\n \"Please use 'feature_range' instead.\"\r\n ),\r\n)\r\n@deprecated_alias(col_name=\"column_name\")\r\ndef min_max_scale(\r\n df: pd.DataFrame,\r\n feature_range: tuple[int | float, int | float] = (0, 1),\r\n column_name: str | int | list[str | int] | pd.Index = None,\r\n jointly: bool = False,\r\n) -> pd.DataFrame:\r\n \"\"\"\r\n Scales DataFrame to between a minimum and maximum value.\r\n\r\n One can optionally set a new target **minimum** and **maximum** value\r\n using the `feature_range` keyword argument.\r\n\r\n If `column_name` is specified, then only that column(s) of data is scaled.\r\n Otherwise, the entire dataframe is scaled.\r\n If `jointly` is `True`, the `column_names` provided entire dataframe will\r\n be regnozied as the one to jointly scale. Otherwise, each column of data\r\n will be scaled separately.\r\n\r\n Example: Basic usage.\r\n\r\n >>> import pandas as pd\r\n >>> import janitor\r\n >>> df = pd.DataFrame({'a':[1, 2], 'b':[0, 1]})\r\n >>> df.min_max_scale()\r\n a b\r\n 0 0.0 0.0\r\n 1 1.0 1.0\r\n >>> df.min_max_scale(jointly=True)\r\n a b\r\n 0 0.5 0.0\r\n 1 1.0 0.5\r\n\r\n Example: Setting custom minimum and maximum.\r\n\r\n >>> import pandas as pd\r\n >>> import janitor\r\n >>> df = pd.DataFrame({'a':[1, 2], 'b':[0, 1]})\r\n >>> df.min_max_scale(feature_range=(0, 100))\r\n a b\r\n 0 0.0 0.0\r\n 1 100.0 100.0\r\n >>> df.min_max_scale(feature_range=(0, 100), jointly=True)\r\n a b\r\n 0 50.0 0.0\r\n 1 100.0 50.0\r\n\r\n Example: Apply min-max to the selected columns.\r\n\r\n >>> import pandas as pd\r\n >>> import janitor\r\n >>> df = pd.DataFrame({'a':[1, 2], 'b':[0, 1], 'c': [1, 0]})\r\n >>> df.min_max_scale(\r\n ... feature_range=(0, 100),\r\n ... column_name=[\"a\", \"c\"],\r\n ... )\r\n a b c\r\n 0 0.0 0 100.0\r\n 1 100.0 1 0.0\r\n >>> df.min_max_scale(\r\n ... feature_range=(0, 100),\r\n ... column_name=[\"a\", \"c\"],\r\n ... jointly=True,\r\n ... )\r\n a b c\r\n 0 50.0 0 50.0\r\n 1 100.0 1 0.0\r\n >>> df.min_max_scale(feature_range=(0, 100), column_name='a')\r\n a b c\r\n 0 0.0 0 1\r\n 1 100.0 1 0\r\n\r\n The aforementioned example might be applied to something like scaling the\r\n isoelectric points of amino acids. While technically they range from\r\n approx 3-10, we can also think of them on the pH scale which ranges from\r\n 1 to 14. Hence, 3 gets scaled not to 0 but approx. 0.15 instead, while 10\r\n gets scaled to approx. 0.69 instead.\r\n\r\n :param df: A pandas DataFrame.\r\n :param feature_range: (optional) Desired range of transformed data.\r\n :param column_name: (optional) The column on which to perform scaling.\r\n :param jointly: (bool) Scale the entire data if Ture.\r\n :returns: A pandas DataFrame with scaled data.\r\n :raises ValueError: if `feature_range` isn't tuple type.\r\n :raises ValueError: if the length of `feature_range` isn't equal to two.\r\n :raises ValueError: if the element of `feature_range` isn't number type.\r\n :raises ValueError: if `feature_range[1]` <= `feature_range[0]`.\r\n\r\n Changed in version 0.24.0: Deleted \"old_min\", \"old_max\", \"new_min\", and\r\n \"new_max\" options.\r\n Changed in version 0.24.0: Added \"feature_range\", and \"jointly\" options.\r\n \"\"\"\r\n\r\n if not (\r\n isinstance(feature_range, (tuple, list))\r\n and len(feature_range) == 2\r\n and all((isinstance(i, (int, float))) for i in feature_range)\r\n and feature_range[1] > feature_range[0]\r\n ):\r\n raise ValueError(\r\n \"`feature_range` should be a range type contains number element, \"\r\n \"the first element must be greater than the second one\"\r\n )\r\n\r\n if column_name is not None:\r\n df = df.copy() # Avoid to change the original DataFrame.\r\n\r\n old_feature_range = df[column_name].pipe(min_max_value, jointly)\r\n df[column_name] = df[column_name].pipe(\r\n apply_min_max,\r\n *old_feature_range,\r\n *feature_range,\r\n )\r\n else:\r\n old_feature_range = df.pipe(min_max_value, jointly)\r\n df = df.pipe(\r\n apply_min_max,\r\n *old_feature_range,\r\n *feature_range,\r\n )\r\n\r\n return df\r\n\r\n\r\ndef min_max_value(df: pd.DataFrame, jointly: bool) -> tuple:\r\n \"\"\"\r\n Return the minimum and maximum of DataFrame.\r\n\r\n Use the `jointly` flag to control returning entire data or each column.\r\n\r\n .. # noqa: DAR101\r\n .. # noqa: DAR201\r\n \"\"\"\r\n\r\n if jointly:\r\n mmin = df.min().min()\r\n mmax = df.max().max()\r\n else:\r\n mmin = df.min()\r\n mmax = df.max()\r\n\r\n return mmin, mmax\r\n\r\n\r\ndef apply_min_max(\r\n df: pd.DataFrame,\r\n old_min: int | float | pd.Series,\r\n old_max: int | float | pd.Series,\r\n new_min: int | float | pd.Series,\r\n new_max: int | float | pd.Series,\r\n) -> pd.DataFrame:\r\n \"\"\"\r\n Apply minimax scaler to DataFrame.\r\n\r\n Notes\r\n -----\r\n - Inputting minimum and maximum type\r\n - int or float : It will apply minimax to the entire DataFrame.\r\n - Series : It will apply minimax to each column.\r\n\r\n .. # noqa: DAR101\r\n .. # noqa: DAR201\r\n \"\"\"\r\n\r\n old_range = old_max - old_min\r\n new_range = new_max - new_min\r\n\r\n return (df - old_min) * new_range / old_range + new_min\r\n", "path": "janitor/functions/min_max_scale.py"}], "after_files": [{"content": "from __future__ import annotations\r\n\r\nimport pandas_flavor as pf\r\nimport pandas as pd\r\n\r\nfrom janitor.utils import deprecated_alias\r\nfrom janitor.utils import deprecated_kwargs\r\n\r\n\r\[email protected]_dataframe_method\r\n@deprecated_kwargs(\r\n \"old_min\",\r\n \"old_max\",\r\n \"new_min\",\r\n \"new_max\",\r\n message=(\r\n \"The keyword argument {argument!r} of {func_name!r} is deprecated. \"\r\n \"Please use 'feature_range' instead.\"\r\n ),\r\n)\r\n@deprecated_alias(col_name=\"column_name\")\r\ndef min_max_scale(\r\n df: pd.DataFrame,\r\n feature_range: tuple[int | float, int | float] = (0, 1),\r\n column_name: str | int | list[str | int] | pd.Index = None,\r\n jointly: bool = False,\r\n) -> pd.DataFrame:\r\n \"\"\"\r\n Scales DataFrame to between a minimum and maximum value.\r\n\r\n One can optionally set a new target **minimum** and **maximum** value\r\n using the `feature_range` keyword argument.\r\n\r\n If `column_name` is specified, then only that column(s) of data is scaled.\r\n Otherwise, the entire dataframe is scaled.\r\n If `jointly` is `True`, the `column_names` provided entire dataframe will\r\n be regnozied as the one to jointly scale. Otherwise, each column of data\r\n will be scaled separately.\r\n\r\n Example: Basic usage.\r\n\r\n >>> import pandas as pd\r\n >>> import janitor\r\n >>> df = pd.DataFrame({'a':[1, 2], 'b':[0, 1]})\r\n >>> df.min_max_scale()\r\n a b\r\n 0 0.0 0.0\r\n 1 1.0 1.0\r\n >>> df.min_max_scale(jointly=True)\r\n a b\r\n 0 0.5 0.0\r\n 1 1.0 0.5\r\n\r\n Example: Setting custom minimum and maximum.\r\n\r\n >>> import pandas as pd\r\n >>> import janitor\r\n >>> df = pd.DataFrame({'a':[1, 2], 'b':[0, 1]})\r\n >>> df.min_max_scale(feature_range=(0, 100))\r\n a b\r\n 0 0.0 0.0\r\n 1 100.0 100.0\r\n >>> df.min_max_scale(feature_range=(0, 100), jointly=True)\r\n a b\r\n 0 50.0 0.0\r\n 1 100.0 50.0\r\n\r\n Example: Apply min-max to the selected columns.\r\n\r\n >>> import pandas as pd\r\n >>> import janitor\r\n >>> df = pd.DataFrame({'a':[1, 2], 'b':[0, 1], 'c': [1, 0]})\r\n >>> df.min_max_scale(\r\n ... feature_range=(0, 100),\r\n ... column_name=[\"a\", \"c\"],\r\n ... )\r\n a b c\r\n 0 0.0 0 100.0\r\n 1 100.0 1 0.0\r\n >>> df.min_max_scale(\r\n ... feature_range=(0, 100),\r\n ... column_name=[\"a\", \"c\"],\r\n ... jointly=True,\r\n ... )\r\n a b c\r\n 0 50.0 0 50.0\r\n 1 100.0 1 0.0\r\n >>> df.min_max_scale(feature_range=(0, 100), column_name='a')\r\n a b c\r\n 0 0.0 0 1\r\n 1 100.0 1 0\r\n\r\n The aforementioned example might be applied to something like scaling the\r\n isoelectric points of amino acids. While technically they range from\r\n approx 3-10, we can also think of them on the pH scale which ranges from\r\n 1 to 14. Hence, 3 gets scaled not to 0 but approx. 0.15 instead, while 10\r\n gets scaled to approx. 0.69 instead.\r\n\r\n !!! summary \"Version Changed\"\r\n\r\n - 0.24.0\r\n - Deleted `old_min`, `old_max`, `new_min`, and `new_max` options.\r\n - Added `feature_range`, and `jointly` options.\r\n\r\n :param df: A pandas DataFrame.\r\n :param feature_range: (optional) Desired range of transformed data.\r\n :param column_name: (optional) The column on which to perform scaling.\r\n :param jointly: (bool) Scale the entire data if Ture.\r\n :returns: A pandas DataFrame with scaled data.\r\n :raises ValueError: if `feature_range` isn't tuple type.\r\n :raises ValueError: if the length of `feature_range` isn't equal to two.\r\n :raises ValueError: if the element of `feature_range` isn't number type.\r\n :raises ValueError: if `feature_range[1]` <= `feature_range[0]`.\r\n \"\"\"\r\n\r\n if not (\r\n isinstance(feature_range, (tuple, list))\r\n and len(feature_range) == 2\r\n and all((isinstance(i, (int, float))) for i in feature_range)\r\n and feature_range[1] > feature_range[0]\r\n ):\r\n raise ValueError(\r\n \"`feature_range` should be a range type contains number element, \"\r\n \"the first element must be greater than the second one\"\r\n )\r\n\r\n if column_name is not None:\r\n df = df.copy() # Avoid to change the original DataFrame.\r\n\r\n old_feature_range = df[column_name].pipe(_min_max_value, jointly)\r\n df[column_name] = df[column_name].pipe(\r\n _apply_min_max,\r\n *old_feature_range,\r\n *feature_range,\r\n )\r\n else:\r\n old_feature_range = df.pipe(_min_max_value, jointly)\r\n df = df.pipe(\r\n _apply_min_max,\r\n *old_feature_range,\r\n *feature_range,\r\n )\r\n\r\n return df\r\n\r\n\r\ndef _min_max_value(df: pd.DataFrame, jointly: bool) -> tuple:\r\n \"\"\"\r\n Return the minimum and maximum of DataFrame.\r\n\r\n Use the `jointly` flag to control returning entire data or each column.\r\n\r\n .. # noqa: DAR101\r\n .. # noqa: DAR201\r\n \"\"\"\r\n\r\n if jointly:\r\n mmin = df.min().min()\r\n mmax = df.max().max()\r\n else:\r\n mmin = df.min()\r\n mmax = df.max()\r\n\r\n return mmin, mmax\r\n\r\n\r\ndef _apply_min_max(\r\n df: pd.DataFrame,\r\n old_min: int | float | pd.Series,\r\n old_max: int | float | pd.Series,\r\n new_min: int | float | pd.Series,\r\n new_max: int | float | pd.Series,\r\n) -> pd.DataFrame:\r\n \"\"\"\r\n Apply minimax scaler to DataFrame.\r\n\r\n Notes\r\n -----\r\n - Inputting minimum and maximum type\r\n - int or float : It will apply minimax to the entire DataFrame.\r\n - Series : It will apply minimax to each column.\r\n\r\n .. # noqa: DAR101\r\n .. # noqa: DAR201\r\n \"\"\"\r\n\r\n old_range = old_max - old_min\r\n new_range = new_max - new_min\r\n\r\n return (df - old_min) * new_range / old_range + new_min\r\n", "path": "janitor/functions/min_max_scale.py"}]}
| 2,942 | 654 |
gh_patches_debug_25371
|
rasdani/github-patches
|
git_diff
|
vispy__vispy-463
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug when running Vispy offline for the first time
There appears to be a bug when you run Vispy offline and you don't have the freetype thing already downloaded. Not completely sure about the exact conditions responsible for the crash, require some testing...
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vispy/util/fonts/_freetype.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # -----------------------------------------------------------------------------
3 # Copyright (c) 2014, Vispy Development Team. All Rights Reserved.
4 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
5 # -----------------------------------------------------------------------------
6
7 # Use freetype to get glyph bitmaps
8
9 import sys
10 import numpy as np
11
12 from ...ext.freetype import (FT_LOAD_RENDER, FT_LOAD_NO_HINTING,
13 FT_LOAD_NO_AUTOHINT, Face)
14
15
16 # Convert face to filename
17 from ._vispy_fonts import _vispy_fonts, _get_vispy_font_filename
18 if sys.platform.startswith('linux'):
19 from ...ext.fontconfig import find_font
20 elif sys.platform.startswith('win'):
21 from ._win32 import find_font # noqa, analysis:ignore
22 else:
23 raise NotImplementedError
24
25 _font_dict = {}
26
27
28 def _load_font(face, bold, italic):
29 key = '%s-%s-%s' % (face, bold, italic)
30 if key in _font_dict:
31 return _font_dict[key]
32 if face in _vispy_fonts:
33 fname = _get_vispy_font_filename(face, bold, italic)
34 else:
35 fname = find_font(face, bold, italic)
36 font = Face(fname)
37 _font_dict[key] = font
38 return font
39
40
41 def _load_glyph(f, char, glyphs_dict):
42 """Load glyph from font into dict"""
43 flags = FT_LOAD_RENDER | FT_LOAD_NO_HINTING | FT_LOAD_NO_AUTOHINT
44 face = _load_font(f['face'], f['bold'], f['italic'])
45 face.set_char_size(f['size'] * 64)
46 # get the character of interest
47 face.load_char(char, flags)
48 bitmap = face.glyph.bitmap
49 width = face.glyph.bitmap.width
50 height = face.glyph.bitmap.rows
51 bitmap = np.array(bitmap.buffer)
52 w0 = bitmap.size // height if bitmap.size > 0 else 0
53 bitmap.shape = (height, w0)
54 bitmap = bitmap[:, :width].astype(np.ubyte)
55
56 left = face.glyph.bitmap_left
57 top = face.glyph.bitmap_top
58 advance = face.glyph.advance.x / 64.
59 glyph = dict(char=char, offset=(left, top), bitmap=bitmap,
60 advance=advance, kerning={})
61 glyphs_dict[char] = glyph
62 # Generate kerning
63 for other_char, other_glyph in glyphs_dict.items():
64 kerning = face.get_kerning(other_char, char)
65 glyph['kerning'][other_char] = kerning.x / 64.
66 kerning = face.get_kerning(char, other_char)
67 other_glyph['kerning'][char] = kerning.x / 64.
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/vispy/util/fonts/_freetype.py b/vispy/util/fonts/_freetype.py
--- a/vispy/util/fonts/_freetype.py
+++ b/vispy/util/fonts/_freetype.py
@@ -9,9 +9,6 @@
import sys
import numpy as np
-from ...ext.freetype import (FT_LOAD_RENDER, FT_LOAD_NO_HINTING,
- FT_LOAD_NO_AUTOHINT, Face)
-
# Convert face to filename
from ._vispy_fonts import _vispy_fonts, _get_vispy_font_filename
@@ -25,7 +22,11 @@
_font_dict = {}
+# Nest freetype imports in case someone doesn't have freetype on their system
+# and isn't using fonts (Windows)
+
def _load_font(face, bold, italic):
+ from ...ext.freetype import Face
key = '%s-%s-%s' % (face, bold, italic)
if key in _font_dict:
return _font_dict[key]
@@ -40,6 +41,8 @@
def _load_glyph(f, char, glyphs_dict):
"""Load glyph from font into dict"""
+ from ...ext.freetype import (FT_LOAD_RENDER, FT_LOAD_NO_HINTING,
+ FT_LOAD_NO_AUTOHINT)
flags = FT_LOAD_RENDER | FT_LOAD_NO_HINTING | FT_LOAD_NO_AUTOHINT
face = _load_font(f['face'], f['bold'], f['italic'])
face.set_char_size(f['size'] * 64)
|
{"golden_diff": "diff --git a/vispy/util/fonts/_freetype.py b/vispy/util/fonts/_freetype.py\n--- a/vispy/util/fonts/_freetype.py\n+++ b/vispy/util/fonts/_freetype.py\n@@ -9,9 +9,6 @@\n import sys\n import numpy as np\n \n-from ...ext.freetype import (FT_LOAD_RENDER, FT_LOAD_NO_HINTING,\n- FT_LOAD_NO_AUTOHINT, Face)\n-\n \n # Convert face to filename\n from ._vispy_fonts import _vispy_fonts, _get_vispy_font_filename\n@@ -25,7 +22,11 @@\n _font_dict = {}\n \n \n+# Nest freetype imports in case someone doesn't have freetype on their system\n+# and isn't using fonts (Windows)\n+\n def _load_font(face, bold, italic):\n+ from ...ext.freetype import Face\n key = '%s-%s-%s' % (face, bold, italic)\n if key in _font_dict:\n return _font_dict[key]\n@@ -40,6 +41,8 @@\n \n def _load_glyph(f, char, glyphs_dict):\n \"\"\"Load glyph from font into dict\"\"\"\n+ from ...ext.freetype import (FT_LOAD_RENDER, FT_LOAD_NO_HINTING,\n+ FT_LOAD_NO_AUTOHINT)\n flags = FT_LOAD_RENDER | FT_LOAD_NO_HINTING | FT_LOAD_NO_AUTOHINT\n face = _load_font(f['face'], f['bold'], f['italic'])\n face.set_char_size(f['size'] * 64)\n", "issue": "Bug when running Vispy offline for the first time\nThere appears to be a bug when you run Vispy offline and you don't have the freetype thing already downloaded. Not completely sure about the exact conditions responsible for the crash, require some testing...\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# Copyright (c) 2014, Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n# -----------------------------------------------------------------------------\n\n# Use freetype to get glyph bitmaps\n\nimport sys\nimport numpy as np\n\nfrom ...ext.freetype import (FT_LOAD_RENDER, FT_LOAD_NO_HINTING,\n FT_LOAD_NO_AUTOHINT, Face)\n\n\n# Convert face to filename\nfrom ._vispy_fonts import _vispy_fonts, _get_vispy_font_filename\nif sys.platform.startswith('linux'):\n from ...ext.fontconfig import find_font\nelif sys.platform.startswith('win'):\n from ._win32 import find_font # noqa, analysis:ignore\nelse:\n raise NotImplementedError\n\n_font_dict = {}\n\n\ndef _load_font(face, bold, italic):\n key = '%s-%s-%s' % (face, bold, italic)\n if key in _font_dict:\n return _font_dict[key]\n if face in _vispy_fonts:\n fname = _get_vispy_font_filename(face, bold, italic)\n else:\n fname = find_font(face, bold, italic)\n font = Face(fname)\n _font_dict[key] = font\n return font\n\n\ndef _load_glyph(f, char, glyphs_dict):\n \"\"\"Load glyph from font into dict\"\"\"\n flags = FT_LOAD_RENDER | FT_LOAD_NO_HINTING | FT_LOAD_NO_AUTOHINT\n face = _load_font(f['face'], f['bold'], f['italic'])\n face.set_char_size(f['size'] * 64)\n # get the character of interest\n face.load_char(char, flags)\n bitmap = face.glyph.bitmap\n width = face.glyph.bitmap.width\n height = face.glyph.bitmap.rows\n bitmap = np.array(bitmap.buffer)\n w0 = bitmap.size // height if bitmap.size > 0 else 0\n bitmap.shape = (height, w0)\n bitmap = bitmap[:, :width].astype(np.ubyte)\n\n left = face.glyph.bitmap_left\n top = face.glyph.bitmap_top\n advance = face.glyph.advance.x / 64.\n glyph = dict(char=char, offset=(left, top), bitmap=bitmap,\n advance=advance, kerning={})\n glyphs_dict[char] = glyph\n # Generate kerning\n for other_char, other_glyph in glyphs_dict.items():\n kerning = face.get_kerning(other_char, char)\n glyph['kerning'][other_char] = kerning.x / 64.\n kerning = face.get_kerning(char, other_char)\n other_glyph['kerning'][char] = kerning.x / 64.\n", "path": "vispy/util/fonts/_freetype.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# Copyright (c) 2014, Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n# -----------------------------------------------------------------------------\n\n# Use freetype to get glyph bitmaps\n\nimport sys\nimport numpy as np\n\n\n# Convert face to filename\nfrom ._vispy_fonts import _vispy_fonts, _get_vispy_font_filename\nif sys.platform.startswith('linux'):\n from ...ext.fontconfig import find_font\nelif sys.platform.startswith('win'):\n from ._win32 import find_font # noqa, analysis:ignore\nelse:\n raise NotImplementedError\n\n_font_dict = {}\n\n\n# Nest freetype imports in case someone doesn't have freetype on their system\n# and isn't using fonts (Windows)\n\ndef _load_font(face, bold, italic):\n from ...ext.freetype import Face\n key = '%s-%s-%s' % (face, bold, italic)\n if key in _font_dict:\n return _font_dict[key]\n if face in _vispy_fonts:\n fname = _get_vispy_font_filename(face, bold, italic)\n else:\n fname = find_font(face, bold, italic)\n font = Face(fname)\n _font_dict[key] = font\n return font\n\n\ndef _load_glyph(f, char, glyphs_dict):\n \"\"\"Load glyph from font into dict\"\"\"\n from ...ext.freetype import (FT_LOAD_RENDER, FT_LOAD_NO_HINTING,\n FT_LOAD_NO_AUTOHINT)\n flags = FT_LOAD_RENDER | FT_LOAD_NO_HINTING | FT_LOAD_NO_AUTOHINT\n face = _load_font(f['face'], f['bold'], f['italic'])\n face.set_char_size(f['size'] * 64)\n # get the character of interest\n face.load_char(char, flags)\n bitmap = face.glyph.bitmap\n width = face.glyph.bitmap.width\n height = face.glyph.bitmap.rows\n bitmap = np.array(bitmap.buffer)\n w0 = bitmap.size // height if bitmap.size > 0 else 0\n bitmap.shape = (height, w0)\n bitmap = bitmap[:, :width].astype(np.ubyte)\n\n left = face.glyph.bitmap_left\n top = face.glyph.bitmap_top\n advance = face.glyph.advance.x / 64.\n glyph = dict(char=char, offset=(left, top), bitmap=bitmap,\n advance=advance, kerning={})\n glyphs_dict[char] = glyph\n # Generate kerning\n for other_char, other_glyph in glyphs_dict.items():\n kerning = face.get_kerning(other_char, char)\n glyph['kerning'][other_char] = kerning.x / 64.\n kerning = face.get_kerning(char, other_char)\n other_glyph['kerning'][char] = kerning.x / 64.\n", "path": "vispy/util/fonts/_freetype.py"}]}
| 1,036 | 339 |
gh_patches_debug_4608
|
rasdani/github-patches
|
git_diff
|
pyro-ppl__pyro-3101
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`poutine.block` does not work in Python 3.10
### Reproducible code
```python
from pyro import poutine
class A:
@poutine.block
def run(self):
return 1
a = A()
a.run() # error
```
This causes the issue https://github.com/pyro-ppl/pyro/issues/3018
[Support for Python 3.10] MCMC example in documentation does not work: `AttributeError: __enter__`
I am following the MCMC example in the documentation: <https://docs.pyro.ai/en/stable/mcmc.html#nuts>
```python
import pyro
import pyro.distributions as dist
from pyro.infer import MCMC, NUTS
import torch
true_coefs = torch.tensor([1., 2., 3.])
data = torch.randn(2000, 3)
dim = 3
labels = dist.Bernoulli(logits=(true_coefs * data).sum(-1)).sample()
def model(data):
coefs_mean = torch.zeros(dim)
coefs = pyro.sample('beta', dist.Normal(coefs_mean, torch.ones(3)))
y = pyro.sample('y', dist.Bernoulli(logits=(coefs * data).sum(-1)), obs=labels)
return y
nuts_kernel = NUTS(model, adapt_step_size=True)
mcmc = MCMC(nuts_kernel, num_samples=500, warmup_steps=300)
mcmc.run(data)
```
But it raises an error:
```
Traceback (most recent call last):
File "/home/ayaka/Projects/test/main.py", line 19, in <module>
mcmc.run(data)
File "/home/ayaka/venv/lib/python3.10/site-packages/pyro/poutine/messenger.py", line 11, in _context_wrap
with context:
AttributeError: __enter__
```
Pyro version: 1.8.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyro/poutine/messenger.py`
Content:
```
1 # Copyright (c) 2017-2019 Uber Technologies, Inc.
2 # SPDX-License-Identifier: Apache-2.0
3
4 from contextlib import contextmanager
5 from functools import partial
6
7 from .runtime import _PYRO_STACK
8
9
10 def _context_wrap(context, fn, *args, **kwargs):
11 with context:
12 return fn(*args, **kwargs)
13
14
15 class _bound_partial(partial):
16 """
17 Converts a (possibly) bound method into a partial function to
18 support class methods as arguments to handlers.
19 """
20
21 def __get__(self, instance, owner):
22 if instance is None:
23 return self
24 return partial(self.func, instance)
25
26
27 def unwrap(fn):
28 """
29 Recursively unwraps poutines.
30 """
31 while True:
32 if isinstance(fn, _bound_partial):
33 fn = fn.func
34 continue
35 if isinstance(fn, partial) and len(fn.args) >= 2:
36 fn = fn.args[1] # extract from partial(handler, fn)
37 continue
38 return fn
39
40
41 class Messenger:
42 """
43 Context manager class that modifies behavior
44 and adds side effects to stochastic functions
45 i.e. callables containing Pyro primitive statements.
46
47 This is the base Messenger class.
48 It implements the default behavior for all Pyro primitives,
49 so that the joint distribution induced by a stochastic function fn
50 is identical to the joint distribution induced by ``Messenger()(fn)``.
51
52 Class of transformers for messages passed during inference.
53 Most inference operations are implemented in subclasses of this.
54 """
55
56 def __call__(self, fn):
57 if not callable(fn):
58 raise ValueError(
59 "{} is not callable, did you mean to pass it as a keyword arg?".format(
60 fn
61 )
62 )
63 wraps = _bound_partial(partial(_context_wrap, self, fn))
64 return wraps
65
66 def __enter__(self):
67 """
68 :returns: self
69 :rtype: pyro.poutine.Messenger
70
71 Installs this messenger at the bottom of the Pyro stack.
72
73 Can be overloaded to add any additional per-call setup functionality,
74 but the derived class must always push itself onto the stack, usually
75 by calling super().__enter__().
76
77 Derived versions cannot be overridden to take arguments
78 and must always return self.
79 """
80 if not (self in _PYRO_STACK):
81 # if this poutine is not already installed,
82 # put it on the bottom of the stack.
83 _PYRO_STACK.append(self)
84
85 # necessary to return self because the return value of __enter__
86 # is bound to VAR in with EXPR as VAR.
87 return self
88 else:
89 # note: currently we raise an error if trying to install a poutine twice.
90 # However, this isn't strictly necessary,
91 # and blocks recursive poutine execution patterns like
92 # like calling self.__call__ inside of self.__call__
93 # or with Handler(...) as p: with p: <BLOCK>
94 # It's hard to imagine use cases for this pattern,
95 # but it could in principle be enabled...
96 raise ValueError("cannot install a Messenger instance twice")
97
98 def __exit__(self, exc_type, exc_value, traceback):
99 """
100 :param exc_type: exception type, e.g. ValueError
101 :param exc_value: exception instance?
102 :param traceback: traceback for exception handling
103 :returns: None
104 :rtype: None
105
106 Removes this messenger from the bottom of the Pyro stack.
107 If an exception is raised, removes this messenger and everything below it.
108 Always called after every execution of self.fn via self.__call__.
109
110 Can be overloaded by derived classes to add any other per-call teardown functionality,
111 but the stack must always be popped by the derived class,
112 usually by calling super().__exit__(*args).
113
114 Derived versions cannot be overridden to take other arguments,
115 and must always return None or False.
116
117 The arguments are the mandatory arguments used by a with statement.
118 Users should never be specifying these.
119 They are all None unless the body of the with statement raised an exception.
120 """
121 if exc_type is None: # callee or enclosed block returned successfully
122 # if the callee or enclosed block returned successfully,
123 # this poutine should be on the bottom of the stack.
124 # If so, remove it from the stack.
125 # if not, raise a ValueError because something really weird happened.
126 if _PYRO_STACK[-1] == self:
127 _PYRO_STACK.pop()
128 else:
129 # should never get here, but just in case...
130 raise ValueError("This Messenger is not on the bottom of the stack")
131 else: # the wrapped function or block raised an exception
132 # poutine exception handling:
133 # when the callee or enclosed block raises an exception,
134 # find this poutine's position in the stack,
135 # then remove it and everything below it in the stack.
136 if self in _PYRO_STACK:
137 loc = _PYRO_STACK.index(self)
138 for i in range(loc, len(_PYRO_STACK)):
139 _PYRO_STACK.pop()
140
141 def _reset(self):
142 pass
143
144 def _process_message(self, msg):
145 """
146 :param msg: current message at a trace site
147 :returns: None
148
149 Process the message by calling appropriate method of itself based
150 on message type. The message is updated in place.
151 """
152 method = getattr(self, "_pyro_{}".format(msg["type"]), None)
153 if method is not None:
154 return method(msg)
155 return None
156
157 def _postprocess_message(self, msg):
158 method = getattr(self, "_pyro_post_{}".format(msg["type"]), None)
159 if method is not None:
160 return method(msg)
161 return None
162
163 @classmethod
164 def register(cls, fn=None, type=None, post=None):
165 """
166 :param fn: function implementing operation
167 :param str type: name of the operation
168 (also passed to :func:`~pyro.poutine.runtime.effectful`)
169 :param bool post: if `True`, use this operation as postprocess
170
171 Dynamically add operations to an effect.
172 Useful for generating wrappers for libraries.
173
174 Example::
175
176 @SomeMessengerClass.register
177 def some_function(msg)
178 ...do_something...
179 return msg
180
181 """
182 if fn is None:
183 return lambda x: cls.register(x, type=type, post=post)
184
185 if type is None:
186 raise ValueError("An operation type name must be provided")
187
188 setattr(cls, "_pyro_" + ("post_" if post else "") + type, staticmethod(fn))
189 return fn
190
191 @classmethod
192 def unregister(cls, fn=None, type=None):
193 """
194 :param fn: function implementing operation
195 :param str type: name of the operation
196 (also passed to :func:`~pyro.poutine.runtime.effectful`)
197
198 Dynamically remove operations from an effect.
199 Useful for removing wrappers from libraries.
200
201 Example::
202
203 SomeMessengerClass.unregister(some_function, "name")
204 """
205 if type is None:
206 raise ValueError("An operation type name must be provided")
207
208 try:
209 delattr(cls, "_pyro_post_" + type)
210 except AttributeError:
211 pass
212
213 try:
214 delattr(cls, "_pyro_" + type)
215 except AttributeError:
216 pass
217
218 return fn
219
220
221 @contextmanager
222 def block_messengers(predicate):
223 """
224 EXPERIMENTAL Context manager to temporarily remove matching messengers from
225 the _PYRO_STACK. Note this does not call the ``.__exit__()`` and
226 ``.__enter__()`` methods.
227
228 This is useful to selectively block enclosing handlers.
229
230 :param callable predicate: A predicate mapping messenger instance to boolean.
231 This mutes all messengers ``m`` for which ``bool(predicate(m)) is True``.
232 :yields: A list of matched messengers that are blocked.
233 """
234 blocked = {}
235 try:
236 for i, messenger in enumerate(_PYRO_STACK):
237 if predicate(messenger):
238 blocked[i] = messenger
239 _PYRO_STACK[i] = Messenger() # trivial messenger
240 yield list(blocked.values())
241 finally:
242 for i, messenger in blocked.items():
243 _PYRO_STACK[i] = messenger
244
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyro/poutine/messenger.py b/pyro/poutine/messenger.py
--- a/pyro/poutine/messenger.py
+++ b/pyro/poutine/messenger.py
@@ -18,6 +18,14 @@
support class methods as arguments to handlers.
"""
+ # Use '__slots__' for func to avoid the issue
+ # `_bound_partial(_bound_partial(f)).func is f`
+ # in Python 3.10.
+ __slots__ = "func"
+
+ def __init__(self, func):
+ self.func = func
+
def __get__(self, instance, owner):
if instance is None:
return self
|
{"golden_diff": "diff --git a/pyro/poutine/messenger.py b/pyro/poutine/messenger.py\n--- a/pyro/poutine/messenger.py\n+++ b/pyro/poutine/messenger.py\n@@ -18,6 +18,14 @@\n support class methods as arguments to handlers.\n \"\"\"\n \n+ # Use '__slots__' for func to avoid the issue\n+ # `_bound_partial(_bound_partial(f)).func is f`\n+ # in Python 3.10.\n+ __slots__ = \"func\"\n+\n+ def __init__(self, func):\n+ self.func = func\n+\n def __get__(self, instance, owner):\n if instance is None:\n return self\n", "issue": "`poutine.block` does not work in Python 3.10\n### Reproducible code\r\n\r\n```python\r\nfrom pyro import poutine\r\n\r\nclass A:\r\n @poutine.block\r\n def run(self):\r\n return 1\r\n\r\na = A()\r\na.run() # error\r\n```\r\n\r\nThis causes the issue https://github.com/pyro-ppl/pyro/issues/3018\n[Support for Python 3.10] MCMC example in documentation does not work: `AttributeError: __enter__`\nI am following the MCMC example in the documentation: <https://docs.pyro.ai/en/stable/mcmc.html#nuts>\r\n\r\n```python\r\nimport pyro\r\nimport pyro.distributions as dist\r\nfrom pyro.infer import MCMC, NUTS\r\nimport torch\r\n\r\ntrue_coefs = torch.tensor([1., 2., 3.])\r\ndata = torch.randn(2000, 3)\r\ndim = 3\r\nlabels = dist.Bernoulli(logits=(true_coefs * data).sum(-1)).sample()\r\n\r\ndef model(data):\r\n coefs_mean = torch.zeros(dim)\r\n coefs = pyro.sample('beta', dist.Normal(coefs_mean, torch.ones(3)))\r\n y = pyro.sample('y', dist.Bernoulli(logits=(coefs * data).sum(-1)), obs=labels)\r\n return y\r\n\r\nnuts_kernel = NUTS(model, adapt_step_size=True)\r\nmcmc = MCMC(nuts_kernel, num_samples=500, warmup_steps=300)\r\nmcmc.run(data)\r\n```\r\n\r\nBut it raises an error:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/ayaka/Projects/test/main.py\", line 19, in <module>\r\n mcmc.run(data)\r\n File \"/home/ayaka/venv/lib/python3.10/site-packages/pyro/poutine/messenger.py\", line 11, in _context_wrap\r\n with context:\r\nAttributeError: __enter__\r\n```\r\n\r\nPyro version: 1.8.0\n", "before_files": [{"content": "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom contextlib import contextmanager\nfrom functools import partial\n\nfrom .runtime import _PYRO_STACK\n\n\ndef _context_wrap(context, fn, *args, **kwargs):\n with context:\n return fn(*args, **kwargs)\n\n\nclass _bound_partial(partial):\n \"\"\"\n Converts a (possibly) bound method into a partial function to\n support class methods as arguments to handlers.\n \"\"\"\n\n def __get__(self, instance, owner):\n if instance is None:\n return self\n return partial(self.func, instance)\n\n\ndef unwrap(fn):\n \"\"\"\n Recursively unwraps poutines.\n \"\"\"\n while True:\n if isinstance(fn, _bound_partial):\n fn = fn.func\n continue\n if isinstance(fn, partial) and len(fn.args) >= 2:\n fn = fn.args[1] # extract from partial(handler, fn)\n continue\n return fn\n\n\nclass Messenger:\n \"\"\"\n Context manager class that modifies behavior\n and adds side effects to stochastic functions\n i.e. callables containing Pyro primitive statements.\n\n This is the base Messenger class.\n It implements the default behavior for all Pyro primitives,\n so that the joint distribution induced by a stochastic function fn\n is identical to the joint distribution induced by ``Messenger()(fn)``.\n\n Class of transformers for messages passed during inference.\n Most inference operations are implemented in subclasses of this.\n \"\"\"\n\n def __call__(self, fn):\n if not callable(fn):\n raise ValueError(\n \"{} is not callable, did you mean to pass it as a keyword arg?\".format(\n fn\n )\n )\n wraps = _bound_partial(partial(_context_wrap, self, fn))\n return wraps\n\n def __enter__(self):\n \"\"\"\n :returns: self\n :rtype: pyro.poutine.Messenger\n\n Installs this messenger at the bottom of the Pyro stack.\n\n Can be overloaded to add any additional per-call setup functionality,\n but the derived class must always push itself onto the stack, usually\n by calling super().__enter__().\n\n Derived versions cannot be overridden to take arguments\n and must always return self.\n \"\"\"\n if not (self in _PYRO_STACK):\n # if this poutine is not already installed,\n # put it on the bottom of the stack.\n _PYRO_STACK.append(self)\n\n # necessary to return self because the return value of __enter__\n # is bound to VAR in with EXPR as VAR.\n return self\n else:\n # note: currently we raise an error if trying to install a poutine twice.\n # However, this isn't strictly necessary,\n # and blocks recursive poutine execution patterns like\n # like calling self.__call__ inside of self.__call__\n # or with Handler(...) as p: with p: <BLOCK>\n # It's hard to imagine use cases for this pattern,\n # but it could in principle be enabled...\n raise ValueError(\"cannot install a Messenger instance twice\")\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"\n :param exc_type: exception type, e.g. ValueError\n :param exc_value: exception instance?\n :param traceback: traceback for exception handling\n :returns: None\n :rtype: None\n\n Removes this messenger from the bottom of the Pyro stack.\n If an exception is raised, removes this messenger and everything below it.\n Always called after every execution of self.fn via self.__call__.\n\n Can be overloaded by derived classes to add any other per-call teardown functionality,\n but the stack must always be popped by the derived class,\n usually by calling super().__exit__(*args).\n\n Derived versions cannot be overridden to take other arguments,\n and must always return None or False.\n\n The arguments are the mandatory arguments used by a with statement.\n Users should never be specifying these.\n They are all None unless the body of the with statement raised an exception.\n \"\"\"\n if exc_type is None: # callee or enclosed block returned successfully\n # if the callee or enclosed block returned successfully,\n # this poutine should be on the bottom of the stack.\n # If so, remove it from the stack.\n # if not, raise a ValueError because something really weird happened.\n if _PYRO_STACK[-1] == self:\n _PYRO_STACK.pop()\n else:\n # should never get here, but just in case...\n raise ValueError(\"This Messenger is not on the bottom of the stack\")\n else: # the wrapped function or block raised an exception\n # poutine exception handling:\n # when the callee or enclosed block raises an exception,\n # find this poutine's position in the stack,\n # then remove it and everything below it in the stack.\n if self in _PYRO_STACK:\n loc = _PYRO_STACK.index(self)\n for i in range(loc, len(_PYRO_STACK)):\n _PYRO_STACK.pop()\n\n def _reset(self):\n pass\n\n def _process_message(self, msg):\n \"\"\"\n :param msg: current message at a trace site\n :returns: None\n\n Process the message by calling appropriate method of itself based\n on message type. The message is updated in place.\n \"\"\"\n method = getattr(self, \"_pyro_{}\".format(msg[\"type\"]), None)\n if method is not None:\n return method(msg)\n return None\n\n def _postprocess_message(self, msg):\n method = getattr(self, \"_pyro_post_{}\".format(msg[\"type\"]), None)\n if method is not None:\n return method(msg)\n return None\n\n @classmethod\n def register(cls, fn=None, type=None, post=None):\n \"\"\"\n :param fn: function implementing operation\n :param str type: name of the operation\n (also passed to :func:`~pyro.poutine.runtime.effectful`)\n :param bool post: if `True`, use this operation as postprocess\n\n Dynamically add operations to an effect.\n Useful for generating wrappers for libraries.\n\n Example::\n\n @SomeMessengerClass.register\n def some_function(msg)\n ...do_something...\n return msg\n\n \"\"\"\n if fn is None:\n return lambda x: cls.register(x, type=type, post=post)\n\n if type is None:\n raise ValueError(\"An operation type name must be provided\")\n\n setattr(cls, \"_pyro_\" + (\"post_\" if post else \"\") + type, staticmethod(fn))\n return fn\n\n @classmethod\n def unregister(cls, fn=None, type=None):\n \"\"\"\n :param fn: function implementing operation\n :param str type: name of the operation\n (also passed to :func:`~pyro.poutine.runtime.effectful`)\n\n Dynamically remove operations from an effect.\n Useful for removing wrappers from libraries.\n\n Example::\n\n SomeMessengerClass.unregister(some_function, \"name\")\n \"\"\"\n if type is None:\n raise ValueError(\"An operation type name must be provided\")\n\n try:\n delattr(cls, \"_pyro_post_\" + type)\n except AttributeError:\n pass\n\n try:\n delattr(cls, \"_pyro_\" + type)\n except AttributeError:\n pass\n\n return fn\n\n\n@contextmanager\ndef block_messengers(predicate):\n \"\"\"\n EXPERIMENTAL Context manager to temporarily remove matching messengers from\n the _PYRO_STACK. Note this does not call the ``.__exit__()`` and\n ``.__enter__()`` methods.\n\n This is useful to selectively block enclosing handlers.\n\n :param callable predicate: A predicate mapping messenger instance to boolean.\n This mutes all messengers ``m`` for which ``bool(predicate(m)) is True``.\n :yields: A list of matched messengers that are blocked.\n \"\"\"\n blocked = {}\n try:\n for i, messenger in enumerate(_PYRO_STACK):\n if predicate(messenger):\n blocked[i] = messenger\n _PYRO_STACK[i] = Messenger() # trivial messenger\n yield list(blocked.values())\n finally:\n for i, messenger in blocked.items():\n _PYRO_STACK[i] = messenger\n", "path": "pyro/poutine/messenger.py"}], "after_files": [{"content": "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom contextlib import contextmanager\nfrom functools import partial\n\nfrom .runtime import _PYRO_STACK\n\n\ndef _context_wrap(context, fn, *args, **kwargs):\n with context:\n return fn(*args, **kwargs)\n\n\nclass _bound_partial(partial):\n \"\"\"\n Converts a (possibly) bound method into a partial function to\n support class methods as arguments to handlers.\n \"\"\"\n\n # Use '__slots__' for func to avoid the issue\n # `_bound_partial(_bound_partial(f)).func is f`\n # in Python 3.10.\n __slots__ = \"func\"\n\n def __init__(self, func):\n self.func = func\n\n def __get__(self, instance, owner):\n if instance is None:\n return self\n return partial(self.func, instance)\n\n\ndef unwrap(fn):\n \"\"\"\n Recursively unwraps poutines.\n \"\"\"\n while True:\n if isinstance(fn, _bound_partial):\n fn = fn.func\n continue\n if isinstance(fn, partial) and len(fn.args) >= 2:\n fn = fn.args[1] # extract from partial(handler, fn)\n continue\n return fn\n\n\nclass Messenger:\n \"\"\"\n Context manager class that modifies behavior\n and adds side effects to stochastic functions\n i.e. callables containing Pyro primitive statements.\n\n This is the base Messenger class.\n It implements the default behavior for all Pyro primitives,\n so that the joint distribution induced by a stochastic function fn\n is identical to the joint distribution induced by ``Messenger()(fn)``.\n\n Class of transformers for messages passed during inference.\n Most inference operations are implemented in subclasses of this.\n \"\"\"\n\n def __call__(self, fn):\n if not callable(fn):\n raise ValueError(\n \"{} is not callable, did you mean to pass it as a keyword arg?\".format(\n fn\n )\n )\n wraps = _bound_partial(partial(_context_wrap, self, fn))\n return wraps\n\n def __enter__(self):\n \"\"\"\n :returns: self\n :rtype: pyro.poutine.Messenger\n\n Installs this messenger at the bottom of the Pyro stack.\n\n Can be overloaded to add any additional per-call setup functionality,\n but the derived class must always push itself onto the stack, usually\n by calling super().__enter__().\n\n Derived versions cannot be overridden to take arguments\n and must always return self.\n \"\"\"\n if not (self in _PYRO_STACK):\n # if this poutine is not already installed,\n # put it on the bottom of the stack.\n _PYRO_STACK.append(self)\n\n # necessary to return self because the return value of __enter__\n # is bound to VAR in with EXPR as VAR.\n return self\n else:\n # note: currently we raise an error if trying to install a poutine twice.\n # However, this isn't strictly necessary,\n # and blocks recursive poutine execution patterns like\n # like calling self.__call__ inside of self.__call__\n # or with Handler(...) as p: with p: <BLOCK>\n # It's hard to imagine use cases for this pattern,\n # but it could in principle be enabled...\n raise ValueError(\"cannot install a Messenger instance twice\")\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"\n :param exc_type: exception type, e.g. ValueError\n :param exc_value: exception instance?\n :param traceback: traceback for exception handling\n :returns: None\n :rtype: None\n\n Removes this messenger from the bottom of the Pyro stack.\n If an exception is raised, removes this messenger and everything below it.\n Always called after every execution of self.fn via self.__call__.\n\n Can be overloaded by derived classes to add any other per-call teardown functionality,\n but the stack must always be popped by the derived class,\n usually by calling super().__exit__(*args).\n\n Derived versions cannot be overridden to take other arguments,\n and must always return None or False.\n\n The arguments are the mandatory arguments used by a with statement.\n Users should never be specifying these.\n They are all None unless the body of the with statement raised an exception.\n \"\"\"\n if exc_type is None: # callee or enclosed block returned successfully\n # if the callee or enclosed block returned successfully,\n # this poutine should be on the bottom of the stack.\n # If so, remove it from the stack.\n # if not, raise a ValueError because something really weird happened.\n if _PYRO_STACK[-1] == self:\n _PYRO_STACK.pop()\n else:\n # should never get here, but just in case...\n raise ValueError(\"This Messenger is not on the bottom of the stack\")\n else: # the wrapped function or block raised an exception\n # poutine exception handling:\n # when the callee or enclosed block raises an exception,\n # find this poutine's position in the stack,\n # then remove it and everything below it in the stack.\n if self in _PYRO_STACK:\n loc = _PYRO_STACK.index(self)\n for i in range(loc, len(_PYRO_STACK)):\n _PYRO_STACK.pop()\n\n def _reset(self):\n pass\n\n def _process_message(self, msg):\n \"\"\"\n :param msg: current message at a trace site\n :returns: None\n\n Process the message by calling appropriate method of itself based\n on message type. The message is updated in place.\n \"\"\"\n method = getattr(self, \"_pyro_{}\".format(msg[\"type\"]), None)\n if method is not None:\n return method(msg)\n return None\n\n def _postprocess_message(self, msg):\n method = getattr(self, \"_pyro_post_{}\".format(msg[\"type\"]), None)\n if method is not None:\n return method(msg)\n return None\n\n @classmethod\n def register(cls, fn=None, type=None, post=None):\n \"\"\"\n :param fn: function implementing operation\n :param str type: name of the operation\n (also passed to :func:`~pyro.poutine.runtime.effectful`)\n :param bool post: if `True`, use this operation as postprocess\n\n Dynamically add operations to an effect.\n Useful for generating wrappers for libraries.\n\n Example::\n\n @SomeMessengerClass.register\n def some_function(msg)\n ...do_something...\n return msg\n\n \"\"\"\n if fn is None:\n return lambda x: cls.register(x, type=type, post=post)\n\n if type is None:\n raise ValueError(\"An operation type name must be provided\")\n\n setattr(cls, \"_pyro_\" + (\"post_\" if post else \"\") + type, staticmethod(fn))\n return fn\n\n @classmethod\n def unregister(cls, fn=None, type=None):\n \"\"\"\n :param fn: function implementing operation\n :param str type: name of the operation\n (also passed to :func:`~pyro.poutine.runtime.effectful`)\n\n Dynamically remove operations from an effect.\n Useful for removing wrappers from libraries.\n\n Example::\n\n SomeMessengerClass.unregister(some_function, \"name\")\n \"\"\"\n if type is None:\n raise ValueError(\"An operation type name must be provided\")\n\n try:\n delattr(cls, \"_pyro_post_\" + type)\n except AttributeError:\n pass\n\n try:\n delattr(cls, \"_pyro_\" + type)\n except AttributeError:\n pass\n\n return fn\n\n\n@contextmanager\ndef block_messengers(predicate):\n \"\"\"\n EXPERIMENTAL Context manager to temporarily remove matching messengers from\n the _PYRO_STACK. Note this does not call the ``.__exit__()`` and\n ``.__enter__()`` methods.\n\n This is useful to selectively block enclosing handlers.\n\n :param callable predicate: A predicate mapping messenger instance to boolean.\n This mutes all messengers ``m`` for which ``bool(predicate(m)) is True``.\n :yields: A list of matched messengers that are blocked.\n \"\"\"\n blocked = {}\n try:\n for i, messenger in enumerate(_PYRO_STACK):\n if predicate(messenger):\n blocked[i] = messenger\n _PYRO_STACK[i] = Messenger() # trivial messenger\n yield list(blocked.values())\n finally:\n for i, messenger in blocked.items():\n _PYRO_STACK[i] = messenger\n", "path": "pyro/poutine/messenger.py"}]}
| 3,128 | 156 |
gh_patches_debug_8387
|
rasdani/github-patches
|
git_diff
|
elastic__apm-agent-python-1352
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
apm-agent-python and structlog - mapper_parsing_exception for `event.dataset`
**Describe the bug**: When using `structlog` and with `elasticapm` and the log processer `elasticapm.structlog_processor`, we have recently seen Logstash refuse our logger with the following error:
```
[2021-10-05T12:10:10,746][WARN ][logstash.outputs.elasticsearch][main][a2a92c7c0ddf765b1969e7e8d4a302b6deca976af4c80a2d9706ccdf2486267b] Could not index event to Elasticsearch. {:status=>400, :action=>["index", {:_id=>nil, :_index=>"logstash-2021.10.05", :routing=>nil}, {"stage_environment"=>"prod", "service.name"=>"flow", "company"=>"<PayerCompany: Hidden Company Name [Parent Company: Hidden Company Name [CRN: DKXXXXXXXX]] [CRN: DKXXXXXXXX]>", "@version"=>"1", "host"=>"167.71.1.240", "sentry"=>"skipped", "timestamp"=>"2021-10-05T12:10:00.483890Z", "logger"=>"account_service.models", "event.dataset"=>"flow", "event"=>"PayerCompany change state request", "level"=>"debug", "port"=>58652, "new"=>"APPROVED", "override"=>false, "@timestamp"=>2021-10-05T12:10:10.462Z, "old"=>"APPROVED", "modline"=>"account_service.models:159"}], :response=>{"index"=>{"_index"=>"logstash-2021.10.05", "_type"=>"_doc", "_id"=>"ST1cUHwBFM723LU2e_JV", "status"=>400, "error"=>{"type"=>"mapper_parsing_exception", "reason"=>"Could not dynamically add mapping for field [event.dataset]. Existing mapping for [event] must be of type object but found [text]."}}}}
```
We setup our structlog like this in the Django settings file:
```python
processors = [
structlog.stdlib.filter_by_level,
structlog.stdlib.add_log_level,
structlog.stdlib.add_logger_name,
log_processors.add_module_and_lineno,
log_processors.normalize_datatypes,
log_processors.attach_environment,
log_processors.timestamper,
structlog_processor, # this is the processor that seems to cause the error
SentryJsonProcessor(level=logging.ERROR, tag_keys="__all__"),
structlog.stdlib.PositionalArgumentsFormatter(),
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
structlog.processors.UnicodeDecoder(),
structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
]
# Structlog
structlog.configure(
processors=processors,
context_class=structlog.threadlocal.wrap_dict(dict),
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
)
```
If we remove `structlog_processor` all loggers are received by Logstash with no problems.
If we write a small customer log processor that strips the `event.dataset` (e.g. `del event_dict["event.dataset"]` it also works fine again.
**To Reproduce**
1. Setup structlog
2. Add the elasticapm.structlog_processor to the list of processors for structlog
3. Send a logger (e.g. `logger.info("test")`).
**Environment (please complete the following information)**
- OS: Debian (slim)
- Python version: 3.9.7
- Framework and version [e.g. Django 2.1]: 3.2.8
- APM Server version: 7.15
- Agent version: N/A
**Additional context**
We use `python-logstash-async` for delivery of the logs to the logstash server.
requirements.txt (excerpts)
```pip
elastic-apm==6.5.0
elasticsearch==7.15.0
django-structlog==2.1.3
python-logstash-async==2.3.0
structlog==21.1.0
structlog-sentry==1.4.0
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticapm/handlers/structlog.py`
Content:
```
1 # Copyright (c) 2019, Elasticsearch BV
2 # All rights reserved.
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are met:
6 #
7 # * Redistributions of source code must retain the above copyright notice, this
8 # list of conditions and the following disclaimer.
9 #
10 # * Redistributions in binary form must reproduce the above copyright notice,
11 # this list of conditions and the following disclaimer in the documentation
12 # and/or other materials provided with the distribution.
13 #
14 # * Neither the name of the copyright holder nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
22 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
28
29 from __future__ import absolute_import
30
31 from elasticapm import get_client
32 from elasticapm.traces import execution_context
33
34
35 def structlog_processor(logger, method_name, event_dict):
36 """
37 Add three new entries to the event_dict for any processed events:
38
39 * transaction.id
40 * trace.id
41 * span.id
42
43 Only adds non-None IDs.
44
45 :param logger:
46 Unused (logger instance in structlog)
47 :param method_name:
48 Unused (wrapped method_name)
49 :param event_dict:
50 Event dictionary for the event we're processing
51 :return:
52 `event_dict`, with three new entries.
53 """
54 transaction = execution_context.get_transaction()
55 if transaction:
56 event_dict["transaction.id"] = transaction.id
57 client = get_client()
58 if client:
59 event_dict["service.name"] = client.config.service_name
60 event_dict["event.dataset"] = f"{client.config.service_name}"
61 if transaction and transaction.trace_parent:
62 event_dict["trace.id"] = transaction.trace_parent.trace_id
63 span = execution_context.get_span()
64 if span and span.id:
65 event_dict["span.id"] = span.id
66 return event_dict
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/elasticapm/handlers/structlog.py b/elasticapm/handlers/structlog.py
--- a/elasticapm/handlers/structlog.py
+++ b/elasticapm/handlers/structlog.py
@@ -57,7 +57,7 @@
client = get_client()
if client:
event_dict["service.name"] = client.config.service_name
- event_dict["event.dataset"] = f"{client.config.service_name}"
+ event_dict["event"] = {"dataset": f"{client.config.service_name}"}
if transaction and transaction.trace_parent:
event_dict["trace.id"] = transaction.trace_parent.trace_id
span = execution_context.get_span()
|
{"golden_diff": "diff --git a/elasticapm/handlers/structlog.py b/elasticapm/handlers/structlog.py\n--- a/elasticapm/handlers/structlog.py\n+++ b/elasticapm/handlers/structlog.py\n@@ -57,7 +57,7 @@\n client = get_client()\n if client:\n event_dict[\"service.name\"] = client.config.service_name\n- event_dict[\"event.dataset\"] = f\"{client.config.service_name}\"\n+ event_dict[\"event\"] = {\"dataset\": f\"{client.config.service_name}\"}\n if transaction and transaction.trace_parent:\n event_dict[\"trace.id\"] = transaction.trace_parent.trace_id\n span = execution_context.get_span()\n", "issue": "apm-agent-python and structlog - mapper_parsing_exception for `event.dataset` \n**Describe the bug**: When using `structlog` and with `elasticapm` and the log processer `elasticapm.structlog_processor`, we have recently seen Logstash refuse our logger with the following error: \r\n\r\n```\r\n[2021-10-05T12:10:10,746][WARN ][logstash.outputs.elasticsearch][main][a2a92c7c0ddf765b1969e7e8d4a302b6deca976af4c80a2d9706ccdf2486267b] Could not index event to Elasticsearch. {:status=>400, :action=>[\"index\", {:_id=>nil, :_index=>\"logstash-2021.10.05\", :routing=>nil}, {\"stage_environment\"=>\"prod\", \"service.name\"=>\"flow\", \"company\"=>\"<PayerCompany: Hidden Company Name [Parent Company: Hidden Company Name [CRN: DKXXXXXXXX]] [CRN: DKXXXXXXXX]>\", \"@version\"=>\"1\", \"host\"=>\"167.71.1.240\", \"sentry\"=>\"skipped\", \"timestamp\"=>\"2021-10-05T12:10:00.483890Z\", \"logger\"=>\"account_service.models\", \"event.dataset\"=>\"flow\", \"event\"=>\"PayerCompany change state request\", \"level\"=>\"debug\", \"port\"=>58652, \"new\"=>\"APPROVED\", \"override\"=>false, \"@timestamp\"=>2021-10-05T12:10:10.462Z, \"old\"=>\"APPROVED\", \"modline\"=>\"account_service.models:159\"}], :response=>{\"index\"=>{\"_index\"=>\"logstash-2021.10.05\", \"_type\"=>\"_doc\", \"_id\"=>\"ST1cUHwBFM723LU2e_JV\", \"status\"=>400, \"error\"=>{\"type\"=>\"mapper_parsing_exception\", \"reason\"=>\"Could not dynamically add mapping for field [event.dataset]. Existing mapping for [event] must be of type object but found [text].\"}}}}\r\n```\r\n\r\nWe setup our structlog like this in the Django settings file: \r\n\r\n```python\r\nprocessors = [\r\n structlog.stdlib.filter_by_level,\r\n structlog.stdlib.add_log_level,\r\n structlog.stdlib.add_logger_name,\r\n log_processors.add_module_and_lineno,\r\n log_processors.normalize_datatypes,\r\n log_processors.attach_environment,\r\n log_processors.timestamper,\r\n structlog_processor, # this is the processor that seems to cause the error\r\n SentryJsonProcessor(level=logging.ERROR, tag_keys=\"__all__\"),\r\n structlog.stdlib.PositionalArgumentsFormatter(),\r\n structlog.processors.StackInfoRenderer(),\r\n structlog.processors.format_exc_info,\r\n structlog.processors.UnicodeDecoder(),\r\n structlog.stdlib.ProcessorFormatter.wrap_for_formatter,\r\n]\r\n\r\n\r\n# Structlog\r\nstructlog.configure(\r\n processors=processors,\r\n context_class=structlog.threadlocal.wrap_dict(dict),\r\n logger_factory=structlog.stdlib.LoggerFactory(),\r\n wrapper_class=structlog.stdlib.BoundLogger,\r\n cache_logger_on_first_use=True,\r\n)\r\n```\r\n\r\nIf we remove `structlog_processor` all loggers are received by Logstash with no problems. \r\n\r\nIf we write a small customer log processor that strips the `event.dataset` (e.g. `del event_dict[\"event.dataset\"]` it also works fine again. \r\n\r\n**To Reproduce**\r\n\r\n1. Setup structlog\r\n2. Add the elasticapm.structlog_processor to the list of processors for structlog\r\n3. Send a logger (e.g. `logger.info(\"test\")`). \r\n\r\n**Environment (please complete the following information)**\r\n- OS: Debian (slim)\r\n- Python version: 3.9.7\r\n- Framework and version [e.g. Django 2.1]: 3.2.8\r\n- APM Server version: 7.15\r\n- Agent version: N/A\r\n\r\n\r\n**Additional context**\r\n\r\nWe use `python-logstash-async` for delivery of the logs to the logstash server. \r\n\r\nrequirements.txt (excerpts)\r\n```pip\r\nelastic-apm==6.5.0\r\nelasticsearch==7.15.0\r\ndjango-structlog==2.1.3\r\npython-logstash-async==2.3.0\r\nstructlog==21.1.0\r\nstructlog-sentry==1.4.0\r\n``` \n", "before_files": [{"content": "# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nfrom __future__ import absolute_import\n\nfrom elasticapm import get_client\nfrom elasticapm.traces import execution_context\n\n\ndef structlog_processor(logger, method_name, event_dict):\n \"\"\"\n Add three new entries to the event_dict for any processed events:\n\n * transaction.id\n * trace.id\n * span.id\n\n Only adds non-None IDs.\n\n :param logger:\n Unused (logger instance in structlog)\n :param method_name:\n Unused (wrapped method_name)\n :param event_dict:\n Event dictionary for the event we're processing\n :return:\n `event_dict`, with three new entries.\n \"\"\"\n transaction = execution_context.get_transaction()\n if transaction:\n event_dict[\"transaction.id\"] = transaction.id\n client = get_client()\n if client:\n event_dict[\"service.name\"] = client.config.service_name\n event_dict[\"event.dataset\"] = f\"{client.config.service_name}\"\n if transaction and transaction.trace_parent:\n event_dict[\"trace.id\"] = transaction.trace_parent.trace_id\n span = execution_context.get_span()\n if span and span.id:\n event_dict[\"span.id\"] = span.id\n return event_dict\n", "path": "elasticapm/handlers/structlog.py"}], "after_files": [{"content": "# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nfrom __future__ import absolute_import\n\nfrom elasticapm import get_client\nfrom elasticapm.traces import execution_context\n\n\ndef structlog_processor(logger, method_name, event_dict):\n \"\"\"\n Add three new entries to the event_dict for any processed events:\n\n * transaction.id\n * trace.id\n * span.id\n\n Only adds non-None IDs.\n\n :param logger:\n Unused (logger instance in structlog)\n :param method_name:\n Unused (wrapped method_name)\n :param event_dict:\n Event dictionary for the event we're processing\n :return:\n `event_dict`, with three new entries.\n \"\"\"\n transaction = execution_context.get_transaction()\n if transaction:\n event_dict[\"transaction.id\"] = transaction.id\n client = get_client()\n if client:\n event_dict[\"service.name\"] = client.config.service_name\n event_dict[\"event\"] = {\"dataset\": f\"{client.config.service_name}\"}\n if transaction and transaction.trace_parent:\n event_dict[\"trace.id\"] = transaction.trace_parent.trace_id\n span = execution_context.get_span()\n if span and span.id:\n event_dict[\"span.id\"] = span.id\n return event_dict\n", "path": "elasticapm/handlers/structlog.py"}]}
| 1,962 | 156 |
gh_patches_debug_13102
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-2637
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
conan search <reference> for recipe without binary packages
In case there is a recipe without binary packages in the local cache, `conan search` with a full reference just prints
`There are no packages for pattern '<ref>'`
- "pattern" is wrong, should be "reference"
- Something like "Package recipe found" would be nice.
- The same command with a suffixed "*" gives more information, although the request is less specific.
This happens mainly when `conan create` fails between export and package. I was confused about the inconsistent outputs with pattern and reference.
Steps to reproduce:
```
> conan search zlib*
There are no packages matching the zlib* pattern
> conan info zlib/1.2.11@conan/stable
...downloads the recipe
> conan search zlib*
Existing package recipes:
zlib/1.2.11@conan/stable
> conan search zlib/1.2.11@conan/stable*
Existing package recipes:
zlib/1.2.11@conan/stable
> conan search zlib/1.2.11@conan/stable
There are no packages for pattern 'zlib/1.2.11@conan/stable'
```
Conan 1.0.1, Windows
- [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).
- [x] I've specified the Conan version, operating system version and any tool that can be relevant.
- [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/client/printer.py`
Content:
```
1 from collections import OrderedDict
2
3 from conans.paths import SimplePaths
4
5 from conans.client.output import Color
6 from conans.model.ref import ConanFileReference
7 from conans.model.ref import PackageReference
8 from conans.client.installer import build_id
9 import fnmatch
10
11
12 class Printer(object):
13 """ Print some specific information """
14
15 INDENT_COLOR = {0: Color.BRIGHT_CYAN,
16 1: Color.BRIGHT_RED,
17 2: Color.BRIGHT_GREEN,
18 3: Color.BRIGHT_YELLOW,
19 4: Color.BRIGHT_MAGENTA}
20
21 INDENT_SPACES = 4
22
23 def __init__(self, out):
24 self._out = out
25
26 def print_graph(self, deps_graph, registry):
27 """ Simple pretty printing of a deps graph, can be improved
28 with options, info like licenses, etc
29 """
30 self._out.writeln("Requirements", Color.BRIGHT_YELLOW)
31 for node in sorted(deps_graph.nodes):
32 ref, _ = node
33 if not ref:
34 continue
35 remote = registry.get_ref(ref)
36 from_text = "from local cache" if not remote else "from '%s'" % remote.name
37 self._out.writeln(" %s %s" % (repr(ref), from_text), Color.BRIGHT_CYAN)
38 self._out.writeln("Packages", Color.BRIGHT_YELLOW)
39 for node in sorted(deps_graph.nodes):
40 ref, conanfile = node
41 if not ref:
42 continue
43 ref = PackageReference(ref, conanfile.info.package_id())
44 self._out.writeln(" %s" % repr(ref), Color.BRIGHT_CYAN)
45 self._out.writeln("")
46
47 def _print_paths(self, ref, conan, path_resolver, show):
48 if isinstance(ref, ConanFileReference):
49 if show("export_folder"):
50 path = path_resolver.export(ref)
51 self._out.writeln(" export_folder: %s" % path, Color.BRIGHT_GREEN)
52 if show("source_folder"):
53 path = path_resolver.source(ref, conan.short_paths)
54 self._out.writeln(" source_folder: %s" % path, Color.BRIGHT_GREEN)
55 if show("build_folder") and isinstance(path_resolver, SimplePaths):
56 # @todo: check if this is correct or if it must always be package_id()
57 bid = build_id(conan)
58 if not bid:
59 bid = conan.info.package_id()
60 path = path_resolver.build(PackageReference(ref, bid), conan.short_paths)
61 self._out.writeln(" build_folder: %s" % path, Color.BRIGHT_GREEN)
62 if show("package_folder") and isinstance(path_resolver, SimplePaths):
63 id_ = conan.info.package_id()
64 path = path_resolver.package(PackageReference(ref, id_), conan.short_paths)
65 self._out.writeln(" package_folder: %s" % path, Color.BRIGHT_GREEN)
66
67 def print_info(self, deps_graph, project_reference, _info, registry, graph_updates_info=None,
68 remote=None, node_times=None, path_resolver=None, package_filter=None,
69 show_paths=False):
70 """ Print the dependency information for a conan file
71
72 Attributes:
73 deps_graph: the dependency graph of conan file references to print
74 placeholder_reference: the conan file reference that represents the conan
75 file for a project on the path. This may be None,
76 in which case the project itself will not be part
77 of the printed dependencies.
78 remote: Remote specified in install command.
79 Could be different from the registry one.
80 """
81 if _info is None: # No filter
82 def show(_):
83 return True
84 else:
85 _info_lower = [s.lower() for s in _info]
86
87 def show(field):
88 return field in _info_lower
89
90 graph_updates_info = graph_updates_info or {}
91 for node in sorted(deps_graph.nodes):
92 ref, conan = node
93 if not ref:
94 # ref is only None iff info is being printed for a project directory, and
95 # not a passed in reference
96 if project_reference is None:
97 continue
98 else:
99 ref = project_reference
100 if package_filter and not fnmatch.fnmatch(str(ref), package_filter):
101 continue
102
103 self._out.writeln("%s" % str(ref), Color.BRIGHT_CYAN)
104 reg_remote = registry.get_ref(ref)
105 # Excludes PROJECT fake reference
106 remote_name = remote
107 if reg_remote and not remote:
108 remote_name = reg_remote.name
109
110 if show("id"):
111 id_ = conan.info.package_id()
112 self._out.writeln(" ID: %s" % id_, Color.BRIGHT_GREEN)
113 if show("build_id"):
114 bid = build_id(conan)
115 self._out.writeln(" BuildID: %s" % bid, Color.BRIGHT_GREEN)
116
117 if show_paths:
118 self._print_paths(ref, conan, path_resolver, show)
119
120 if isinstance(ref, ConanFileReference) and show("remote"):
121 if reg_remote:
122 self._out.writeln(" Remote: %s=%s" % (reg_remote.name, reg_remote.url),
123 Color.BRIGHT_GREEN)
124 else:
125 self._out.writeln(" Remote: None", Color.BRIGHT_GREEN)
126 url = getattr(conan, "url", None)
127 license_ = getattr(conan, "license", None)
128 author = getattr(conan, "author", None)
129 if url and show("url"):
130 self._out.writeln(" URL: %s" % url, Color.BRIGHT_GREEN)
131
132 if license_ and show("license"):
133 if isinstance(license_, (list, tuple, set)):
134 self._out.writeln(" Licenses: %s" % ", ".join(license_), Color.BRIGHT_GREEN)
135 else:
136 self._out.writeln(" License: %s" % license_, Color.BRIGHT_GREEN)
137 if author and show("author"):
138 self._out.writeln(" Author: %s" % author, Color.BRIGHT_GREEN)
139
140 if isinstance(ref, ConanFileReference) and show("update"): # Excludes PROJECT
141 update = graph_updates_info.get(ref)
142 update_messages = {
143 None: ("Version not checked", Color.WHITE),
144 0: ("You have the latest version (%s)" % remote_name, Color.BRIGHT_GREEN),
145 1: ("There is a newer version (%s)" % remote_name, Color.BRIGHT_YELLOW),
146 -1: ("The local file is newer than remote's one (%s)" % remote_name,
147 Color.BRIGHT_RED)
148 }
149 self._out.writeln(" Updates: %s" % update_messages[update][0],
150 update_messages[update][1])
151
152 if node_times and node_times.get(ref, None) and show("date"):
153 self._out.writeln(" Creation date: %s" % node_times.get(ref, None),
154 Color.BRIGHT_GREEN)
155
156 dependants = deps_graph.inverse_neighbors(node)
157 if isinstance(ref, ConanFileReference) and show("required"): # Excludes
158 self._out.writeln(" Required by:", Color.BRIGHT_GREEN)
159 for d in dependants:
160 ref = d.conan_ref if d.conan_ref else project_reference
161 self._out.writeln(" %s" % str(ref), Color.BRIGHT_YELLOW)
162
163 if show("requires"):
164 depends = deps_graph.neighbors(node)
165 if depends:
166 self._out.writeln(" Requires:", Color.BRIGHT_GREEN)
167 for d in depends:
168 self._out.writeln(" %s" % repr(d.conan_ref), Color.BRIGHT_YELLOW)
169
170 def print_search_recipes(self, references, pattern, raw):
171 """ Print all the exported conans information
172 param pattern: wildcards, e.g., "opencv/*"
173 """
174 if not references and not raw:
175 warn_msg = "There are no packages"
176 pattern_msg = " matching the '%s' pattern" % pattern
177 self._out.info(warn_msg + pattern_msg if pattern else warn_msg)
178 return
179
180 if not raw:
181 self._out.info("Existing package recipes:\n")
182 if isinstance(references, dict):
183 for remote, refs in references.items():
184 self._out.highlight("Remote '%s':" % str(remote))
185 for conan_ref in sorted(refs):
186 self._print_colored_line(str(conan_ref), indent=0)
187 else:
188 for conan_ref in sorted(references):
189 self._print_colored_line(str(conan_ref), indent=0)
190 else:
191 if isinstance(references, dict):
192 for remote, refs in references.items():
193 self._out.writeln("Remote '%s':" % str(remote))
194 for conan_ref in sorted(refs):
195 self._out.writeln(str(conan_ref))
196 else:
197 self._out.writeln("\n".join([str(ref) for ref in references]))
198
199 def print_search_packages(self, packages_props, reference, recipe_hash, packages_query):
200 if not packages_props:
201 if packages_query:
202 warn_msg = "There are no packages for reference '%s' matching the query '%s'" % (str(reference),
203 packages_query)
204 else:
205 warn_msg = "There are no packages for pattern '%s'" % str(reference)
206 self._out.info(warn_msg)
207 return
208
209 self._out.info("Existing packages for recipe %s:\n" % str(reference))
210 # Each package
211 for package_id, properties in sorted(packages_props.items()):
212 self._print_colored_line("Package_ID", package_id, 1)
213 for section in ("options", "settings", "full_requires"):
214 attrs = properties.get(section, [])
215 if attrs:
216 section_name = {"full_requires": "requires"}.get(section, section)
217 self._print_colored_line("[%s]" % section_name, indent=2)
218 if isinstance(attrs, dict): # options, settings
219 attrs = OrderedDict(sorted(attrs.items()))
220 for key, value in attrs.items():
221 self._print_colored_line(key, value=value, indent=3)
222 elif isinstance(attrs, list): # full requires
223 for key in sorted(attrs):
224 self._print_colored_line(key, indent=3)
225 package_recipe_hash = properties.get("recipe_hash", None)
226 # Always compare outdated with local recipe, simplification,
227 # if a remote check is needed install recipe first
228 if recipe_hash:
229 self._print_colored_line("Outdated from recipe: %s" % (recipe_hash != package_recipe_hash), indent=2)
230 self._out.writeln("")
231
232 def print_profile(self, name, profile):
233 self._out.info("Configuration for profile %s:\n" % name)
234 self._print_profile_section("settings", profile.settings.items(), separator="=")
235 self._print_profile_section("options", profile.options.as_list(), separator="=")
236 self._print_profile_section("build_requires", [(key, ", ".join(str(val) for val in values))
237 for key, values in
238 profile.build_requires.items()])
239
240 envs = []
241 for package, env_vars in profile.env_values.data.items():
242 for name, value in env_vars.items():
243 key = "%s:%s" % (package, name) if package else name
244 envs.append((key, value))
245 self._print_profile_section("env", envs, separator='=')
246
247 def _print_profile_section(self, name, items, indent=0, separator=": "):
248 self._print_colored_line("[%s]" % name, indent=indent, color=Color.BRIGHT_RED)
249 for key, value in items:
250 self._print_colored_line(key, value=str(value), indent=0, separator=separator)
251
252 def _print_colored_line(self, text, value=None, indent=0, separator=": ", color=None):
253 """ Print a colored line depending on its indentation level
254 Attributes:
255 text: string line
256 split_symbol: if you want an output with different in-line colors
257 indent_plus: integer to add a plus indentation
258 """
259 text = text.strip()
260 if not text:
261 return
262
263 text_color = Printer.INDENT_COLOR.get(indent, Color.BRIGHT_WHITE) if not color else color
264 indent_text = ' ' * Printer.INDENT_SPACES * indent
265 if value is not None:
266 value_color = Color.BRIGHT_WHITE
267 self._out.write('%s%s%s' % (indent_text, text, separator), text_color)
268 self._out.writeln(value, value_color)
269 else:
270 self._out.writeln('%s%s' % (indent_text, text), text_color)
271
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conans/client/printer.py b/conans/client/printer.py
--- a/conans/client/printer.py
+++ b/conans/client/printer.py
@@ -202,7 +202,11 @@
warn_msg = "There are no packages for reference '%s' matching the query '%s'" % (str(reference),
packages_query)
else:
- warn_msg = "There are no packages for pattern '%s'" % str(reference)
+ warn_msg = "There are no packages for reference '%s'" % str(reference)
+
+ if recipe_hash:
+ warn_msg += ", but package recipe found."
+
self._out.info(warn_msg)
return
|
{"golden_diff": "diff --git a/conans/client/printer.py b/conans/client/printer.py\n--- a/conans/client/printer.py\n+++ b/conans/client/printer.py\n@@ -202,7 +202,11 @@\n warn_msg = \"There are no packages for reference '%s' matching the query '%s'\" % (str(reference),\n packages_query)\n else:\n- warn_msg = \"There are no packages for pattern '%s'\" % str(reference)\n+ warn_msg = \"There are no packages for reference '%s'\" % str(reference)\n+\n+ if recipe_hash:\n+ warn_msg += \", but package recipe found.\"\n+\n self._out.info(warn_msg)\n return\n", "issue": "conan search <reference> for recipe without binary packages\nIn case there is a recipe without binary packages in the local cache, `conan search` with a full reference just prints \r\n`There are no packages for pattern '<ref>'`\r\n- \"pattern\" is wrong, should be \"reference\"\r\n- Something like \"Package recipe found\" would be nice.\r\n- The same command with a suffixed \"*\" gives more information, although the request is less specific.\r\n\r\nThis happens mainly when `conan create` fails between export and package. I was confused about the inconsistent outputs with pattern and reference.\r\n\r\nSteps to reproduce:\r\n```\r\n> conan search zlib*\r\nThere are no packages matching the zlib* pattern\r\n\r\n> conan info zlib/1.2.11@conan/stable\r\n...downloads the recipe\r\n\r\n> conan search zlib*\r\nExisting package recipes:\r\nzlib/1.2.11@conan/stable\r\n\r\n> conan search zlib/1.2.11@conan/stable*\r\nExisting package recipes:\r\nzlib/1.2.11@conan/stable\r\n\r\n> conan search zlib/1.2.11@conan/stable\r\nThere are no packages for pattern 'zlib/1.2.11@conan/stable'\r\n```\r\n\r\nConan 1.0.1, Windows \r\n- [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).\r\n- [x] I've specified the Conan version, operating system version and any tool that can be relevant.\r\n- [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.\r\n\r\n\n", "before_files": [{"content": "from collections import OrderedDict\n\nfrom conans.paths import SimplePaths\n\nfrom conans.client.output import Color\nfrom conans.model.ref import ConanFileReference\nfrom conans.model.ref import PackageReference\nfrom conans.client.installer import build_id\nimport fnmatch\n\n\nclass Printer(object):\n \"\"\" Print some specific information \"\"\"\n\n INDENT_COLOR = {0: Color.BRIGHT_CYAN,\n 1: Color.BRIGHT_RED,\n 2: Color.BRIGHT_GREEN,\n 3: Color.BRIGHT_YELLOW,\n 4: Color.BRIGHT_MAGENTA}\n\n INDENT_SPACES = 4\n\n def __init__(self, out):\n self._out = out\n\n def print_graph(self, deps_graph, registry):\n \"\"\" Simple pretty printing of a deps graph, can be improved\n with options, info like licenses, etc\n \"\"\"\n self._out.writeln(\"Requirements\", Color.BRIGHT_YELLOW)\n for node in sorted(deps_graph.nodes):\n ref, _ = node\n if not ref:\n continue\n remote = registry.get_ref(ref)\n from_text = \"from local cache\" if not remote else \"from '%s'\" % remote.name\n self._out.writeln(\" %s %s\" % (repr(ref), from_text), Color.BRIGHT_CYAN)\n self._out.writeln(\"Packages\", Color.BRIGHT_YELLOW)\n for node in sorted(deps_graph.nodes):\n ref, conanfile = node\n if not ref:\n continue\n ref = PackageReference(ref, conanfile.info.package_id())\n self._out.writeln(\" %s\" % repr(ref), Color.BRIGHT_CYAN)\n self._out.writeln(\"\")\n\n def _print_paths(self, ref, conan, path_resolver, show):\n if isinstance(ref, ConanFileReference):\n if show(\"export_folder\"):\n path = path_resolver.export(ref)\n self._out.writeln(\" export_folder: %s\" % path, Color.BRIGHT_GREEN)\n if show(\"source_folder\"):\n path = path_resolver.source(ref, conan.short_paths)\n self._out.writeln(\" source_folder: %s\" % path, Color.BRIGHT_GREEN)\n if show(\"build_folder\") and isinstance(path_resolver, SimplePaths):\n # @todo: check if this is correct or if it must always be package_id()\n bid = build_id(conan)\n if not bid:\n bid = conan.info.package_id()\n path = path_resolver.build(PackageReference(ref, bid), conan.short_paths)\n self._out.writeln(\" build_folder: %s\" % path, Color.BRIGHT_GREEN)\n if show(\"package_folder\") and isinstance(path_resolver, SimplePaths):\n id_ = conan.info.package_id()\n path = path_resolver.package(PackageReference(ref, id_), conan.short_paths)\n self._out.writeln(\" package_folder: %s\" % path, Color.BRIGHT_GREEN)\n\n def print_info(self, deps_graph, project_reference, _info, registry, graph_updates_info=None,\n remote=None, node_times=None, path_resolver=None, package_filter=None,\n show_paths=False):\n \"\"\" Print the dependency information for a conan file\n\n Attributes:\n deps_graph: the dependency graph of conan file references to print\n placeholder_reference: the conan file reference that represents the conan\n file for a project on the path. This may be None,\n in which case the project itself will not be part\n of the printed dependencies.\n remote: Remote specified in install command.\n Could be different from the registry one.\n \"\"\"\n if _info is None: # No filter\n def show(_):\n return True\n else:\n _info_lower = [s.lower() for s in _info]\n\n def show(field):\n return field in _info_lower\n\n graph_updates_info = graph_updates_info or {}\n for node in sorted(deps_graph.nodes):\n ref, conan = node\n if not ref:\n # ref is only None iff info is being printed for a project directory, and\n # not a passed in reference\n if project_reference is None:\n continue\n else:\n ref = project_reference\n if package_filter and not fnmatch.fnmatch(str(ref), package_filter):\n continue\n\n self._out.writeln(\"%s\" % str(ref), Color.BRIGHT_CYAN)\n reg_remote = registry.get_ref(ref)\n # Excludes PROJECT fake reference\n remote_name = remote\n if reg_remote and not remote:\n remote_name = reg_remote.name\n\n if show(\"id\"):\n id_ = conan.info.package_id()\n self._out.writeln(\" ID: %s\" % id_, Color.BRIGHT_GREEN)\n if show(\"build_id\"):\n bid = build_id(conan)\n self._out.writeln(\" BuildID: %s\" % bid, Color.BRIGHT_GREEN)\n\n if show_paths:\n self._print_paths(ref, conan, path_resolver, show)\n\n if isinstance(ref, ConanFileReference) and show(\"remote\"):\n if reg_remote:\n self._out.writeln(\" Remote: %s=%s\" % (reg_remote.name, reg_remote.url),\n Color.BRIGHT_GREEN)\n else:\n self._out.writeln(\" Remote: None\", Color.BRIGHT_GREEN)\n url = getattr(conan, \"url\", None)\n license_ = getattr(conan, \"license\", None)\n author = getattr(conan, \"author\", None)\n if url and show(\"url\"):\n self._out.writeln(\" URL: %s\" % url, Color.BRIGHT_GREEN)\n\n if license_ and show(\"license\"):\n if isinstance(license_, (list, tuple, set)):\n self._out.writeln(\" Licenses: %s\" % \", \".join(license_), Color.BRIGHT_GREEN)\n else:\n self._out.writeln(\" License: %s\" % license_, Color.BRIGHT_GREEN)\n if author and show(\"author\"):\n self._out.writeln(\" Author: %s\" % author, Color.BRIGHT_GREEN)\n\n if isinstance(ref, ConanFileReference) and show(\"update\"): # Excludes PROJECT\n update = graph_updates_info.get(ref)\n update_messages = {\n None: (\"Version not checked\", Color.WHITE),\n 0: (\"You have the latest version (%s)\" % remote_name, Color.BRIGHT_GREEN),\n 1: (\"There is a newer version (%s)\" % remote_name, Color.BRIGHT_YELLOW),\n -1: (\"The local file is newer than remote's one (%s)\" % remote_name,\n Color.BRIGHT_RED)\n }\n self._out.writeln(\" Updates: %s\" % update_messages[update][0],\n update_messages[update][1])\n\n if node_times and node_times.get(ref, None) and show(\"date\"):\n self._out.writeln(\" Creation date: %s\" % node_times.get(ref, None),\n Color.BRIGHT_GREEN)\n\n dependants = deps_graph.inverse_neighbors(node)\n if isinstance(ref, ConanFileReference) and show(\"required\"): # Excludes\n self._out.writeln(\" Required by:\", Color.BRIGHT_GREEN)\n for d in dependants:\n ref = d.conan_ref if d.conan_ref else project_reference\n self._out.writeln(\" %s\" % str(ref), Color.BRIGHT_YELLOW)\n\n if show(\"requires\"):\n depends = deps_graph.neighbors(node)\n if depends:\n self._out.writeln(\" Requires:\", Color.BRIGHT_GREEN)\n for d in depends:\n self._out.writeln(\" %s\" % repr(d.conan_ref), Color.BRIGHT_YELLOW)\n\n def print_search_recipes(self, references, pattern, raw):\n \"\"\" Print all the exported conans information\n param pattern: wildcards, e.g., \"opencv/*\"\n \"\"\"\n if not references and not raw:\n warn_msg = \"There are no packages\"\n pattern_msg = \" matching the '%s' pattern\" % pattern\n self._out.info(warn_msg + pattern_msg if pattern else warn_msg)\n return\n\n if not raw:\n self._out.info(\"Existing package recipes:\\n\")\n if isinstance(references, dict):\n for remote, refs in references.items():\n self._out.highlight(\"Remote '%s':\" % str(remote))\n for conan_ref in sorted(refs):\n self._print_colored_line(str(conan_ref), indent=0)\n else:\n for conan_ref in sorted(references):\n self._print_colored_line(str(conan_ref), indent=0)\n else:\n if isinstance(references, dict):\n for remote, refs in references.items():\n self._out.writeln(\"Remote '%s':\" % str(remote))\n for conan_ref in sorted(refs):\n self._out.writeln(str(conan_ref))\n else:\n self._out.writeln(\"\\n\".join([str(ref) for ref in references]))\n\n def print_search_packages(self, packages_props, reference, recipe_hash, packages_query):\n if not packages_props:\n if packages_query:\n warn_msg = \"There are no packages for reference '%s' matching the query '%s'\" % (str(reference),\n packages_query)\n else:\n warn_msg = \"There are no packages for pattern '%s'\" % str(reference)\n self._out.info(warn_msg)\n return\n\n self._out.info(\"Existing packages for recipe %s:\\n\" % str(reference))\n # Each package\n for package_id, properties in sorted(packages_props.items()):\n self._print_colored_line(\"Package_ID\", package_id, 1)\n for section in (\"options\", \"settings\", \"full_requires\"):\n attrs = properties.get(section, [])\n if attrs:\n section_name = {\"full_requires\": \"requires\"}.get(section, section)\n self._print_colored_line(\"[%s]\" % section_name, indent=2)\n if isinstance(attrs, dict): # options, settings\n attrs = OrderedDict(sorted(attrs.items()))\n for key, value in attrs.items():\n self._print_colored_line(key, value=value, indent=3)\n elif isinstance(attrs, list): # full requires\n for key in sorted(attrs):\n self._print_colored_line(key, indent=3)\n package_recipe_hash = properties.get(\"recipe_hash\", None)\n # Always compare outdated with local recipe, simplification,\n # if a remote check is needed install recipe first\n if recipe_hash:\n self._print_colored_line(\"Outdated from recipe: %s\" % (recipe_hash != package_recipe_hash), indent=2)\n self._out.writeln(\"\")\n\n def print_profile(self, name, profile):\n self._out.info(\"Configuration for profile %s:\\n\" % name)\n self._print_profile_section(\"settings\", profile.settings.items(), separator=\"=\")\n self._print_profile_section(\"options\", profile.options.as_list(), separator=\"=\")\n self._print_profile_section(\"build_requires\", [(key, \", \".join(str(val) for val in values))\n for key, values in\n profile.build_requires.items()])\n\n envs = []\n for package, env_vars in profile.env_values.data.items():\n for name, value in env_vars.items():\n key = \"%s:%s\" % (package, name) if package else name\n envs.append((key, value))\n self._print_profile_section(\"env\", envs, separator='=')\n\n def _print_profile_section(self, name, items, indent=0, separator=\": \"):\n self._print_colored_line(\"[%s]\" % name, indent=indent, color=Color.BRIGHT_RED)\n for key, value in items:\n self._print_colored_line(key, value=str(value), indent=0, separator=separator)\n\n def _print_colored_line(self, text, value=None, indent=0, separator=\": \", color=None):\n \"\"\" Print a colored line depending on its indentation level\n Attributes:\n text: string line\n split_symbol: if you want an output with different in-line colors\n indent_plus: integer to add a plus indentation\n \"\"\"\n text = text.strip()\n if not text:\n return\n\n text_color = Printer.INDENT_COLOR.get(indent, Color.BRIGHT_WHITE) if not color else color\n indent_text = ' ' * Printer.INDENT_SPACES * indent\n if value is not None:\n value_color = Color.BRIGHT_WHITE\n self._out.write('%s%s%s' % (indent_text, text, separator), text_color)\n self._out.writeln(value, value_color)\n else:\n self._out.writeln('%s%s' % (indent_text, text), text_color)\n", "path": "conans/client/printer.py"}], "after_files": [{"content": "from collections import OrderedDict\n\nfrom conans.paths import SimplePaths\n\nfrom conans.client.output import Color\nfrom conans.model.ref import ConanFileReference\nfrom conans.model.ref import PackageReference\nfrom conans.client.installer import build_id\nimport fnmatch\n\n\nclass Printer(object):\n \"\"\" Print some specific information \"\"\"\n\n INDENT_COLOR = {0: Color.BRIGHT_CYAN,\n 1: Color.BRIGHT_RED,\n 2: Color.BRIGHT_GREEN,\n 3: Color.BRIGHT_YELLOW,\n 4: Color.BRIGHT_MAGENTA}\n\n INDENT_SPACES = 4\n\n def __init__(self, out):\n self._out = out\n\n def print_graph(self, deps_graph, registry):\n \"\"\" Simple pretty printing of a deps graph, can be improved\n with options, info like licenses, etc\n \"\"\"\n self._out.writeln(\"Requirements\", Color.BRIGHT_YELLOW)\n for node in sorted(deps_graph.nodes):\n ref, _ = node\n if not ref:\n continue\n remote = registry.get_ref(ref)\n from_text = \"from local cache\" if not remote else \"from '%s'\" % remote.name\n self._out.writeln(\" %s %s\" % (repr(ref), from_text), Color.BRIGHT_CYAN)\n self._out.writeln(\"Packages\", Color.BRIGHT_YELLOW)\n for node in sorted(deps_graph.nodes):\n ref, conanfile = node\n if not ref:\n continue\n ref = PackageReference(ref, conanfile.info.package_id())\n self._out.writeln(\" %s\" % repr(ref), Color.BRIGHT_CYAN)\n self._out.writeln(\"\")\n\n def _print_paths(self, ref, conan, path_resolver, show):\n if isinstance(ref, ConanFileReference):\n if show(\"export_folder\"):\n path = path_resolver.export(ref)\n self._out.writeln(\" export_folder: %s\" % path, Color.BRIGHT_GREEN)\n if show(\"source_folder\"):\n path = path_resolver.source(ref, conan.short_paths)\n self._out.writeln(\" source_folder: %s\" % path, Color.BRIGHT_GREEN)\n if show(\"build_folder\") and isinstance(path_resolver, SimplePaths):\n # @todo: check if this is correct or if it must always be package_id()\n bid = build_id(conan)\n if not bid:\n bid = conan.info.package_id()\n path = path_resolver.build(PackageReference(ref, bid), conan.short_paths)\n self._out.writeln(\" build_folder: %s\" % path, Color.BRIGHT_GREEN)\n if show(\"package_folder\") and isinstance(path_resolver, SimplePaths):\n id_ = conan.info.package_id()\n path = path_resolver.package(PackageReference(ref, id_), conan.short_paths)\n self._out.writeln(\" package_folder: %s\" % path, Color.BRIGHT_GREEN)\n\n def print_info(self, deps_graph, project_reference, _info, registry, graph_updates_info=None,\n remote=None, node_times=None, path_resolver=None, package_filter=None,\n show_paths=False):\n \"\"\" Print the dependency information for a conan file\n\n Attributes:\n deps_graph: the dependency graph of conan file references to print\n placeholder_reference: the conan file reference that represents the conan\n file for a project on the path. This may be None,\n in which case the project itself will not be part\n of the printed dependencies.\n remote: Remote specified in install command.\n Could be different from the registry one.\n \"\"\"\n if _info is None: # No filter\n def show(_):\n return True\n else:\n _info_lower = [s.lower() for s in _info]\n\n def show(field):\n return field in _info_lower\n\n graph_updates_info = graph_updates_info or {}\n for node in sorted(deps_graph.nodes):\n ref, conan = node\n if not ref:\n # ref is only None iff info is being printed for a project directory, and\n # not a passed in reference\n if project_reference is None:\n continue\n else:\n ref = project_reference\n if package_filter and not fnmatch.fnmatch(str(ref), package_filter):\n continue\n\n self._out.writeln(\"%s\" % str(ref), Color.BRIGHT_CYAN)\n reg_remote = registry.get_ref(ref)\n # Excludes PROJECT fake reference\n remote_name = remote\n if reg_remote and not remote:\n remote_name = reg_remote.name\n\n if show(\"id\"):\n id_ = conan.info.package_id()\n self._out.writeln(\" ID: %s\" % id_, Color.BRIGHT_GREEN)\n if show(\"build_id\"):\n bid = build_id(conan)\n self._out.writeln(\" BuildID: %s\" % bid, Color.BRIGHT_GREEN)\n\n if show_paths:\n self._print_paths(ref, conan, path_resolver, show)\n\n if isinstance(ref, ConanFileReference) and show(\"remote\"):\n if reg_remote:\n self._out.writeln(\" Remote: %s=%s\" % (reg_remote.name, reg_remote.url),\n Color.BRIGHT_GREEN)\n else:\n self._out.writeln(\" Remote: None\", Color.BRIGHT_GREEN)\n url = getattr(conan, \"url\", None)\n license_ = getattr(conan, \"license\", None)\n author = getattr(conan, \"author\", None)\n if url and show(\"url\"):\n self._out.writeln(\" URL: %s\" % url, Color.BRIGHT_GREEN)\n\n if license_ and show(\"license\"):\n if isinstance(license_, (list, tuple, set)):\n self._out.writeln(\" Licenses: %s\" % \", \".join(license_), Color.BRIGHT_GREEN)\n else:\n self._out.writeln(\" License: %s\" % license_, Color.BRIGHT_GREEN)\n if author and show(\"author\"):\n self._out.writeln(\" Author: %s\" % author, Color.BRIGHT_GREEN)\n\n if isinstance(ref, ConanFileReference) and show(\"update\"): # Excludes PROJECT\n update = graph_updates_info.get(ref)\n update_messages = {\n None: (\"Version not checked\", Color.WHITE),\n 0: (\"You have the latest version (%s)\" % remote_name, Color.BRIGHT_GREEN),\n 1: (\"There is a newer version (%s)\" % remote_name, Color.BRIGHT_YELLOW),\n -1: (\"The local file is newer than remote's one (%s)\" % remote_name,\n Color.BRIGHT_RED)\n }\n self._out.writeln(\" Updates: %s\" % update_messages[update][0],\n update_messages[update][1])\n\n if node_times and node_times.get(ref, None) and show(\"date\"):\n self._out.writeln(\" Creation date: %s\" % node_times.get(ref, None),\n Color.BRIGHT_GREEN)\n\n dependants = deps_graph.inverse_neighbors(node)\n if isinstance(ref, ConanFileReference) and show(\"required\"): # Excludes\n self._out.writeln(\" Required by:\", Color.BRIGHT_GREEN)\n for d in dependants:\n ref = d.conan_ref if d.conan_ref else project_reference\n self._out.writeln(\" %s\" % str(ref), Color.BRIGHT_YELLOW)\n\n if show(\"requires\"):\n depends = deps_graph.neighbors(node)\n if depends:\n self._out.writeln(\" Requires:\", Color.BRIGHT_GREEN)\n for d in depends:\n self._out.writeln(\" %s\" % repr(d.conan_ref), Color.BRIGHT_YELLOW)\n\n def print_search_recipes(self, references, pattern, raw):\n \"\"\" Print all the exported conans information\n param pattern: wildcards, e.g., \"opencv/*\"\n \"\"\"\n if not references and not raw:\n warn_msg = \"There are no packages\"\n pattern_msg = \" matching the '%s' pattern\" % pattern\n self._out.info(warn_msg + pattern_msg if pattern else warn_msg)\n return\n\n if not raw:\n self._out.info(\"Existing package recipes:\\n\")\n if isinstance(references, dict):\n for remote, refs in references.items():\n self._out.highlight(\"Remote '%s':\" % str(remote))\n for conan_ref in sorted(refs):\n self._print_colored_line(str(conan_ref), indent=0)\n else:\n for conan_ref in sorted(references):\n self._print_colored_line(str(conan_ref), indent=0)\n else:\n if isinstance(references, dict):\n for remote, refs in references.items():\n self._out.writeln(\"Remote '%s':\" % str(remote))\n for conan_ref in sorted(refs):\n self._out.writeln(str(conan_ref))\n else:\n self._out.writeln(\"\\n\".join([str(ref) for ref in references]))\n\n def print_search_packages(self, packages_props, reference, recipe_hash, packages_query):\n if not packages_props:\n if packages_query:\n warn_msg = \"There are no packages for reference '%s' matching the query '%s'\" % (str(reference),\n packages_query)\n else:\n warn_msg = \"There are no packages for reference '%s'\" % str(reference)\n\n if recipe_hash:\n warn_msg += \", but package recipe found.\"\n\n self._out.info(warn_msg)\n return\n\n self._out.info(\"Existing packages for recipe %s:\\n\" % str(reference))\n # Each package\n for package_id, properties in sorted(packages_props.items()):\n self._print_colored_line(\"Package_ID\", package_id, 1)\n for section in (\"options\", \"settings\", \"full_requires\"):\n attrs = properties.get(section, [])\n if attrs:\n section_name = {\"full_requires\": \"requires\"}.get(section, section)\n self._print_colored_line(\"[%s]\" % section_name, indent=2)\n if isinstance(attrs, dict): # options, settings\n attrs = OrderedDict(sorted(attrs.items()))\n for key, value in attrs.items():\n self._print_colored_line(key, value=value, indent=3)\n elif isinstance(attrs, list): # full requires\n for key in sorted(attrs):\n self._print_colored_line(key, indent=3)\n package_recipe_hash = properties.get(\"recipe_hash\", None)\n # Always compare outdated with local recipe, simplification,\n # if a remote check is needed install recipe first\n if recipe_hash:\n self._print_colored_line(\"Outdated from recipe: %s\" % (recipe_hash != package_recipe_hash), indent=2)\n self._out.writeln(\"\")\n\n def print_profile(self, name, profile):\n self._out.info(\"Configuration for profile %s:\\n\" % name)\n self._print_profile_section(\"settings\", profile.settings.items(), separator=\"=\")\n self._print_profile_section(\"options\", profile.options.as_list(), separator=\"=\")\n self._print_profile_section(\"build_requires\", [(key, \", \".join(str(val) for val in values))\n for key, values in\n profile.build_requires.items()])\n\n envs = []\n for package, env_vars in profile.env_values.data.items():\n for name, value in env_vars.items():\n key = \"%s:%s\" % (package, name) if package else name\n envs.append((key, value))\n self._print_profile_section(\"env\", envs, separator='=')\n\n def _print_profile_section(self, name, items, indent=0, separator=\": \"):\n self._print_colored_line(\"[%s]\" % name, indent=indent, color=Color.BRIGHT_RED)\n for key, value in items:\n self._print_colored_line(key, value=str(value), indent=0, separator=separator)\n\n def _print_colored_line(self, text, value=None, indent=0, separator=\": \", color=None):\n \"\"\" Print a colored line depending on its indentation level\n Attributes:\n text: string line\n split_symbol: if you want an output with different in-line colors\n indent_plus: integer to add a plus indentation\n \"\"\"\n text = text.strip()\n if not text:\n return\n\n text_color = Printer.INDENT_COLOR.get(indent, Color.BRIGHT_WHITE) if not color else color\n indent_text = ' ' * Printer.INDENT_SPACES * indent\n if value is not None:\n value_color = Color.BRIGHT_WHITE\n self._out.write('%s%s%s' % (indent_text, text, separator), text_color)\n self._out.writeln(value, value_color)\n else:\n self._out.writeln('%s%s' % (indent_text, text), text_color)\n", "path": "conans/client/printer.py"}]}
| 4,054 | 153 |
gh_patches_debug_11649
|
rasdani/github-patches
|
git_diff
|
pypa__cibuildwheel-129
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MacOS travis-ci build stalls
I noticed today that our MacOS travis-CI build using cibuildwheel has started stalling at the following point of the cibuildwheel setup:
```bash
+ pip install --upgrade setuptools
Collecting setuptools
Downloading https://files.pythonhosted.org/packages/37/06/754589caf971b0d2d48f151c2586f62902d93dc908e2fd9b9b9f6aa3c9dd/setuptools-40.6.3-py2.py3-none-any.whl (573kB)
Installing collected packages: setuptools
Found existing installation: setuptools 28.8.0
Uninstalling setuptools-28.8.0:
No output has been received in the last 10m0s, this potentially indicates a stalled build or something wrong with the build itself.
Check the details on how to adjust your build configuration on: https://docs.travis-ci.com/user/common-build-problems/#Build-times-out-because-no-output-was-received
The build has been terminated
```
This hasn't affected our Windows/Linux builds, and no changes to our devops pipeline has occurred (and even then, only superficial Python commits in our codebase were committed).
This issue happens no matter how many times we restart the build, and seems odd - this step is usually instantaneous on previous Mac cibuildwheel builds.
Since this is a command that is called by cibuildwheel, has there been a recent change that breaks this step?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cibuildwheel/macos.py`
Content:
```
1 from __future__ import print_function
2 import os, subprocess, shlex, sys, shutil
3 from collections import namedtuple
4 from glob import glob
5 try:
6 from shlex import quote as shlex_quote
7 except ImportError:
8 from pipes import quote as shlex_quote
9
10 from .util import prepare_command, get_build_verbosity_extra_flags
11
12
13 def build(project_dir, output_dir, test_command, test_requires, before_build, build_verbosity, build_selector, environment):
14 PythonConfiguration = namedtuple('PythonConfiguration', ['version', 'identifier', 'url'])
15 python_configurations = [
16 PythonConfiguration(version='2.7', identifier='cp27-macosx_10_6_intel', url='https://www.python.org/ftp/python/2.7.15/python-2.7.15-macosx10.6.pkg'),
17 PythonConfiguration(version='3.4', identifier='cp34-macosx_10_6_intel', url='https://www.python.org/ftp/python/3.4.4/python-3.4.4-macosx10.6.pkg'),
18 PythonConfiguration(version='3.5', identifier='cp35-macosx_10_6_intel', url='https://www.python.org/ftp/python/3.5.4/python-3.5.4-macosx10.6.pkg'),
19 PythonConfiguration(version='3.6', identifier='cp36-macosx_10_6_intel', url='https://www.python.org/ftp/python/3.6.8/python-3.6.8-macosx10.6.pkg'),
20 PythonConfiguration(version='3.7', identifier='cp37-macosx_10_6_intel', url='https://www.python.org/ftp/python/3.7.2/python-3.7.2-macosx10.6.pkg'),
21 ]
22 get_pip_url = 'https://bootstrap.pypa.io/get-pip.py'
23 get_pip_script = '/tmp/get-pip.py'
24
25 pkgs_output = subprocess.check_output(['pkgutil', '--pkgs'])
26 if sys.version_info[0] >= 3:
27 pkgs_output = pkgs_output.decode('utf8')
28 installed_system_packages = pkgs_output.splitlines()
29
30 def call(args, env=None, cwd=None, shell=False):
31 # print the command executing for the logs
32 if shell:
33 print('+ %s' % args)
34 else:
35 print('+ ' + ' '.join(shlex_quote(a) for a in args))
36
37 return subprocess.check_call(args, env=env, cwd=cwd, shell=shell)
38
39 abs_project_dir = os.path.abspath(project_dir)
40
41 # get latest pip once and for all
42 call(['curl', '-L', '-o', get_pip_script, get_pip_url])
43
44 for config in python_configurations:
45 if not build_selector(config.identifier):
46 print('cibuildwheel: Skipping build %s' % config.identifier, file=sys.stderr)
47 continue
48
49 # if this version of python isn't installed, get it from python.org and install
50 python_package_identifier = 'org.python.Python.PythonFramework-%s' % config.version
51 if python_package_identifier not in installed_system_packages:
52 # download the pkg
53 call(['curl', '-L', '-o', '/tmp/Python.pkg', config.url])
54 # install
55 call(['sudo', 'installer', '-pkg', '/tmp/Python.pkg', '-target', '/'])
56 # patch open ssl
57 if config.version in ('3.4', '3.5'):
58 call(['curl', '-fsSLo', '/tmp/python-patch.tar.gz', 'https://github.com/mayeut/patch-macos-python-openssl/releases/download/v1.0.2q/patch-macos-python-%s-openssl-v1.0.2q.tar.gz' % config.version])
59 call(['sudo', 'tar', '-C', '/Library/Frameworks/Python.framework/Versions/%s/' % config.version, '-xmf', '/tmp/python-patch.tar.gz'])
60
61 installation_bin_path = '/Library/Frameworks/Python.framework/Versions/{}/bin'.format(config.version)
62
63 # Python bin folders on Mac don't symlink python3 to python, so we do that
64 # so `python` and `pip` always point to the active configuration.
65 if os.path.exists('/tmp/cibw_bin'):
66 shutil.rmtree('/tmp/cibw_bin')
67 os.makedirs('/tmp/cibw_bin')
68
69 if config.version[0] == '3':
70 os.symlink(os.path.join(installation_bin_path, 'python3'), '/tmp/cibw_bin/python')
71 os.symlink(os.path.join(installation_bin_path, 'python3-config'), '/tmp/cibw_bin/python-config')
72 os.symlink(os.path.join(installation_bin_path, 'pip3'), '/tmp/cibw_bin/pip')
73
74 env = os.environ.copy()
75 env['PATH'] = os.pathsep.join([
76 '/tmp/cibw_bin',
77 installation_bin_path,
78 env['PATH'],
79 ])
80 env = environment.as_dictionary(prev_environment=env)
81
82 # check what version we're on
83 call(['which', 'python'], env=env)
84 call(['python', '--version'], env=env)
85
86 # install pip & wheel
87 call(['python', get_pip_script, '--no-setuptools', '--no-wheel'], env=env)
88 call(['pip', '--version'], env=env)
89 # sudo required, because the removal of the old version of setuptools might cause problems with newer pip versions (see issue #122)
90 call(['sudo', 'pip', 'install', '--upgrade', 'setuptools'], env=env)
91 call(['pip', 'install', 'wheel'], env=env)
92 call(['pip', 'install', 'delocate'], env=env)
93
94 # setup dirs
95 if os.path.exists('/tmp/built_wheel'):
96 shutil.rmtree('/tmp/built_wheel')
97 os.makedirs('/tmp/built_wheel')
98 if os.path.exists('/tmp/delocated_wheel'):
99 shutil.rmtree('/tmp/delocated_wheel')
100 os.makedirs('/tmp/delocated_wheel')
101
102 # run the before_build command
103 if before_build:
104 before_build_prepared = prepare_command(before_build, project=abs_project_dir)
105 call(before_build_prepared, env=env, shell=True)
106
107 # build the wheel
108 call(['pip', 'wheel', abs_project_dir, '-w', '/tmp/built_wheel', '--no-deps'] + get_build_verbosity_extra_flags(build_verbosity), env=env)
109 built_wheel = glob('/tmp/built_wheel/*.whl')[0]
110
111 if built_wheel.endswith('none-any.whl'):
112 # pure python wheel - just move
113 shutil.move(built_wheel, '/tmp/delocated_wheel')
114 else:
115 # list the dependencies
116 call(['delocate-listdeps', built_wheel], env=env)
117 # rebuild the wheel with shared libraries included and place in output dir
118 call(['delocate-wheel', '-w', '/tmp/delocated_wheel', built_wheel], env=env)
119 delocated_wheel = glob('/tmp/delocated_wheel/*.whl')[0]
120
121 # install the wheel
122 call(['pip', 'install', delocated_wheel], env=env)
123
124 # test the wheel
125 if test_requires:
126 call(['pip', 'install'] + test_requires, env=env)
127 if test_command:
128 # run the tests from $HOME, with an absolute path in the command
129 # (this ensures that Python runs the tests against the installed wheel
130 # and not the repo code)
131 test_command_prepared = prepare_command(test_command, project=abs_project_dir)
132 call(test_command_prepared, cwd=os.environ['HOME'], env=env, shell=True)
133
134 # we're all done here; move it to output (overwrite existing)
135 dst = os.path.join(output_dir, os.path.basename(delocated_wheel))
136 shutil.move(delocated_wheel, dst)
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cibuildwheel/macos.py b/cibuildwheel/macos.py
--- a/cibuildwheel/macos.py
+++ b/cibuildwheel/macos.py
@@ -86,8 +86,7 @@
# install pip & wheel
call(['python', get_pip_script, '--no-setuptools', '--no-wheel'], env=env)
call(['pip', '--version'], env=env)
- # sudo required, because the removal of the old version of setuptools might cause problems with newer pip versions (see issue #122)
- call(['sudo', 'pip', 'install', '--upgrade', 'setuptools'], env=env)
+ call(['pip', 'install', '--upgrade', 'setuptools'], env=env)
call(['pip', 'install', 'wheel'], env=env)
call(['pip', 'install', 'delocate'], env=env)
|
{"golden_diff": "diff --git a/cibuildwheel/macos.py b/cibuildwheel/macos.py\n--- a/cibuildwheel/macos.py\n+++ b/cibuildwheel/macos.py\n@@ -86,8 +86,7 @@\n # install pip & wheel\n call(['python', get_pip_script, '--no-setuptools', '--no-wheel'], env=env)\n call(['pip', '--version'], env=env)\n- # sudo required, because the removal of the old version of setuptools might cause problems with newer pip versions (see issue #122)\n- call(['sudo', 'pip', 'install', '--upgrade', 'setuptools'], env=env)\n+ call(['pip', 'install', '--upgrade', 'setuptools'], env=env)\n call(['pip', 'install', 'wheel'], env=env)\n call(['pip', 'install', 'delocate'], env=env)\n", "issue": "MacOS travis-ci build stalls\nI noticed today that our MacOS travis-CI build using cibuildwheel has started stalling at the following point of the cibuildwheel setup:\r\n\r\n```bash\r\n+ pip install --upgrade setuptools\r\nCollecting setuptools\r\n Downloading https://files.pythonhosted.org/packages/37/06/754589caf971b0d2d48f151c2586f62902d93dc908e2fd9b9b9f6aa3c9dd/setuptools-40.6.3-py2.py3-none-any.whl (573kB)\r\nInstalling collected packages: setuptools\r\n Found existing installation: setuptools 28.8.0\r\n Uninstalling setuptools-28.8.0:\r\n\r\nNo output has been received in the last 10m0s, this potentially indicates a stalled build or something wrong with the build itself.\r\nCheck the details on how to adjust your build configuration on: https://docs.travis-ci.com/user/common-build-problems/#Build-times-out-because-no-output-was-received\r\nThe build has been terminated\r\n```\r\n\r\nThis hasn't affected our Windows/Linux builds, and no changes to our devops pipeline has occurred (and even then, only superficial Python commits in our codebase were committed). \r\n\r\nThis issue happens no matter how many times we restart the build, and seems odd - this step is usually instantaneous on previous Mac cibuildwheel builds.\r\n\r\nSince this is a command that is called by cibuildwheel, has there been a recent change that breaks this step?\n", "before_files": [{"content": "from __future__ import print_function\nimport os, subprocess, shlex, sys, shutil\nfrom collections import namedtuple\nfrom glob import glob\ntry:\n from shlex import quote as shlex_quote\nexcept ImportError:\n from pipes import quote as shlex_quote\n\nfrom .util import prepare_command, get_build_verbosity_extra_flags\n\n\ndef build(project_dir, output_dir, test_command, test_requires, before_build, build_verbosity, build_selector, environment):\n PythonConfiguration = namedtuple('PythonConfiguration', ['version', 'identifier', 'url'])\n python_configurations = [\n PythonConfiguration(version='2.7', identifier='cp27-macosx_10_6_intel', url='https://www.python.org/ftp/python/2.7.15/python-2.7.15-macosx10.6.pkg'),\n PythonConfiguration(version='3.4', identifier='cp34-macosx_10_6_intel', url='https://www.python.org/ftp/python/3.4.4/python-3.4.4-macosx10.6.pkg'),\n PythonConfiguration(version='3.5', identifier='cp35-macosx_10_6_intel', url='https://www.python.org/ftp/python/3.5.4/python-3.5.4-macosx10.6.pkg'),\n PythonConfiguration(version='3.6', identifier='cp36-macosx_10_6_intel', url='https://www.python.org/ftp/python/3.6.8/python-3.6.8-macosx10.6.pkg'),\n PythonConfiguration(version='3.7', identifier='cp37-macosx_10_6_intel', url='https://www.python.org/ftp/python/3.7.2/python-3.7.2-macosx10.6.pkg'),\n ]\n get_pip_url = 'https://bootstrap.pypa.io/get-pip.py'\n get_pip_script = '/tmp/get-pip.py'\n\n pkgs_output = subprocess.check_output(['pkgutil', '--pkgs'])\n if sys.version_info[0] >= 3:\n pkgs_output = pkgs_output.decode('utf8')\n installed_system_packages = pkgs_output.splitlines()\n\n def call(args, env=None, cwd=None, shell=False):\n # print the command executing for the logs\n if shell:\n print('+ %s' % args)\n else:\n print('+ ' + ' '.join(shlex_quote(a) for a in args))\n\n return subprocess.check_call(args, env=env, cwd=cwd, shell=shell)\n\n abs_project_dir = os.path.abspath(project_dir)\n\n # get latest pip once and for all\n call(['curl', '-L', '-o', get_pip_script, get_pip_url])\n\n for config in python_configurations:\n if not build_selector(config.identifier):\n print('cibuildwheel: Skipping build %s' % config.identifier, file=sys.stderr)\n continue\n\n # if this version of python isn't installed, get it from python.org and install\n python_package_identifier = 'org.python.Python.PythonFramework-%s' % config.version\n if python_package_identifier not in installed_system_packages:\n # download the pkg\n call(['curl', '-L', '-o', '/tmp/Python.pkg', config.url])\n # install\n call(['sudo', 'installer', '-pkg', '/tmp/Python.pkg', '-target', '/'])\n # patch open ssl\n if config.version in ('3.4', '3.5'):\n call(['curl', '-fsSLo', '/tmp/python-patch.tar.gz', 'https://github.com/mayeut/patch-macos-python-openssl/releases/download/v1.0.2q/patch-macos-python-%s-openssl-v1.0.2q.tar.gz' % config.version])\n call(['sudo', 'tar', '-C', '/Library/Frameworks/Python.framework/Versions/%s/' % config.version, '-xmf', '/tmp/python-patch.tar.gz'])\n\n installation_bin_path = '/Library/Frameworks/Python.framework/Versions/{}/bin'.format(config.version)\n\n # Python bin folders on Mac don't symlink python3 to python, so we do that\n # so `python` and `pip` always point to the active configuration.\n if os.path.exists('/tmp/cibw_bin'):\n shutil.rmtree('/tmp/cibw_bin')\n os.makedirs('/tmp/cibw_bin')\n\n if config.version[0] == '3':\n os.symlink(os.path.join(installation_bin_path, 'python3'), '/tmp/cibw_bin/python')\n os.symlink(os.path.join(installation_bin_path, 'python3-config'), '/tmp/cibw_bin/python-config')\n os.symlink(os.path.join(installation_bin_path, 'pip3'), '/tmp/cibw_bin/pip')\n\n env = os.environ.copy()\n env['PATH'] = os.pathsep.join([\n '/tmp/cibw_bin',\n installation_bin_path,\n env['PATH'],\n ])\n env = environment.as_dictionary(prev_environment=env)\n\n # check what version we're on\n call(['which', 'python'], env=env)\n call(['python', '--version'], env=env)\n\n # install pip & wheel\n call(['python', get_pip_script, '--no-setuptools', '--no-wheel'], env=env)\n call(['pip', '--version'], env=env)\n # sudo required, because the removal of the old version of setuptools might cause problems with newer pip versions (see issue #122)\n call(['sudo', 'pip', 'install', '--upgrade', 'setuptools'], env=env)\n call(['pip', 'install', 'wheel'], env=env)\n call(['pip', 'install', 'delocate'], env=env)\n\n # setup dirs\n if os.path.exists('/tmp/built_wheel'):\n shutil.rmtree('/tmp/built_wheel')\n os.makedirs('/tmp/built_wheel')\n if os.path.exists('/tmp/delocated_wheel'):\n shutil.rmtree('/tmp/delocated_wheel')\n os.makedirs('/tmp/delocated_wheel')\n\n # run the before_build command\n if before_build:\n before_build_prepared = prepare_command(before_build, project=abs_project_dir)\n call(before_build_prepared, env=env, shell=True)\n\n # build the wheel\n call(['pip', 'wheel', abs_project_dir, '-w', '/tmp/built_wheel', '--no-deps'] + get_build_verbosity_extra_flags(build_verbosity), env=env)\n built_wheel = glob('/tmp/built_wheel/*.whl')[0]\n\n if built_wheel.endswith('none-any.whl'):\n # pure python wheel - just move\n shutil.move(built_wheel, '/tmp/delocated_wheel')\n else:\n # list the dependencies\n call(['delocate-listdeps', built_wheel], env=env)\n # rebuild the wheel with shared libraries included and place in output dir\n call(['delocate-wheel', '-w', '/tmp/delocated_wheel', built_wheel], env=env)\n delocated_wheel = glob('/tmp/delocated_wheel/*.whl')[0]\n\n # install the wheel\n call(['pip', 'install', delocated_wheel], env=env)\n\n # test the wheel\n if test_requires:\n call(['pip', 'install'] + test_requires, env=env)\n if test_command:\n # run the tests from $HOME, with an absolute path in the command\n # (this ensures that Python runs the tests against the installed wheel\n # and not the repo code)\n test_command_prepared = prepare_command(test_command, project=abs_project_dir)\n call(test_command_prepared, cwd=os.environ['HOME'], env=env, shell=True)\n\n # we're all done here; move it to output (overwrite existing)\n dst = os.path.join(output_dir, os.path.basename(delocated_wheel))\n shutil.move(delocated_wheel, dst)\n", "path": "cibuildwheel/macos.py"}], "after_files": [{"content": "from __future__ import print_function\nimport os, subprocess, shlex, sys, shutil\nfrom collections import namedtuple\nfrom glob import glob\ntry:\n from shlex import quote as shlex_quote\nexcept ImportError:\n from pipes import quote as shlex_quote\n\nfrom .util import prepare_command, get_build_verbosity_extra_flags\n\n\ndef build(project_dir, output_dir, test_command, test_requires, before_build, build_verbosity, build_selector, environment):\n PythonConfiguration = namedtuple('PythonConfiguration', ['version', 'identifier', 'url'])\n python_configurations = [\n PythonConfiguration(version='2.7', identifier='cp27-macosx_10_6_intel', url='https://www.python.org/ftp/python/2.7.15/python-2.7.15-macosx10.6.pkg'),\n PythonConfiguration(version='3.4', identifier='cp34-macosx_10_6_intel', url='https://www.python.org/ftp/python/3.4.4/python-3.4.4-macosx10.6.pkg'),\n PythonConfiguration(version='3.5', identifier='cp35-macosx_10_6_intel', url='https://www.python.org/ftp/python/3.5.4/python-3.5.4-macosx10.6.pkg'),\n PythonConfiguration(version='3.6', identifier='cp36-macosx_10_6_intel', url='https://www.python.org/ftp/python/3.6.8/python-3.6.8-macosx10.6.pkg'),\n PythonConfiguration(version='3.7', identifier='cp37-macosx_10_6_intel', url='https://www.python.org/ftp/python/3.7.2/python-3.7.2-macosx10.6.pkg'),\n ]\n get_pip_url = 'https://bootstrap.pypa.io/get-pip.py'\n get_pip_script = '/tmp/get-pip.py'\n\n pkgs_output = subprocess.check_output(['pkgutil', '--pkgs'])\n if sys.version_info[0] >= 3:\n pkgs_output = pkgs_output.decode('utf8')\n installed_system_packages = pkgs_output.splitlines()\n\n def call(args, env=None, cwd=None, shell=False):\n # print the command executing for the logs\n if shell:\n print('+ %s' % args)\n else:\n print('+ ' + ' '.join(shlex_quote(a) for a in args))\n\n return subprocess.check_call(args, env=env, cwd=cwd, shell=shell)\n\n abs_project_dir = os.path.abspath(project_dir)\n\n # get latest pip once and for all\n call(['curl', '-L', '-o', get_pip_script, get_pip_url])\n\n for config in python_configurations:\n if not build_selector(config.identifier):\n print('cibuildwheel: Skipping build %s' % config.identifier, file=sys.stderr)\n continue\n\n # if this version of python isn't installed, get it from python.org and install\n python_package_identifier = 'org.python.Python.PythonFramework-%s' % config.version\n if python_package_identifier not in installed_system_packages:\n # download the pkg\n call(['curl', '-L', '-o', '/tmp/Python.pkg', config.url])\n # install\n call(['sudo', 'installer', '-pkg', '/tmp/Python.pkg', '-target', '/'])\n # patch open ssl\n if config.version in ('3.4', '3.5'):\n call(['curl', '-fsSLo', '/tmp/python-patch.tar.gz', 'https://github.com/mayeut/patch-macos-python-openssl/releases/download/v1.0.2q/patch-macos-python-%s-openssl-v1.0.2q.tar.gz' % config.version])\n call(['sudo', 'tar', '-C', '/Library/Frameworks/Python.framework/Versions/%s/' % config.version, '-xmf', '/tmp/python-patch.tar.gz'])\n\n installation_bin_path = '/Library/Frameworks/Python.framework/Versions/{}/bin'.format(config.version)\n\n # Python bin folders on Mac don't symlink python3 to python, so we do that\n # so `python` and `pip` always point to the active configuration.\n if os.path.exists('/tmp/cibw_bin'):\n shutil.rmtree('/tmp/cibw_bin')\n os.makedirs('/tmp/cibw_bin')\n\n if config.version[0] == '3':\n os.symlink(os.path.join(installation_bin_path, 'python3'), '/tmp/cibw_bin/python')\n os.symlink(os.path.join(installation_bin_path, 'python3-config'), '/tmp/cibw_bin/python-config')\n os.symlink(os.path.join(installation_bin_path, 'pip3'), '/tmp/cibw_bin/pip')\n\n env = os.environ.copy()\n env['PATH'] = os.pathsep.join([\n '/tmp/cibw_bin',\n installation_bin_path,\n env['PATH'],\n ])\n env = environment.as_dictionary(prev_environment=env)\n\n # check what version we're on\n call(['which', 'python'], env=env)\n call(['python', '--version'], env=env)\n\n # install pip & wheel\n call(['python', get_pip_script, '--no-setuptools', '--no-wheel'], env=env)\n call(['pip', '--version'], env=env)\n call(['pip', 'install', '--upgrade', 'setuptools'], env=env)\n call(['pip', 'install', 'wheel'], env=env)\n call(['pip', 'install', 'delocate'], env=env)\n\n # setup dirs\n if os.path.exists('/tmp/built_wheel'):\n shutil.rmtree('/tmp/built_wheel')\n os.makedirs('/tmp/built_wheel')\n if os.path.exists('/tmp/delocated_wheel'):\n shutil.rmtree('/tmp/delocated_wheel')\n os.makedirs('/tmp/delocated_wheel')\n\n # run the before_build command\n if before_build:\n before_build_prepared = prepare_command(before_build, project=abs_project_dir)\n call(before_build_prepared, env=env, shell=True)\n\n # build the wheel\n call(['pip', 'wheel', abs_project_dir, '-w', '/tmp/built_wheel', '--no-deps'] + get_build_verbosity_extra_flags(build_verbosity), env=env)\n built_wheel = glob('/tmp/built_wheel/*.whl')[0]\n\n if built_wheel.endswith('none-any.whl'):\n # pure python wheel - just move\n shutil.move(built_wheel, '/tmp/delocated_wheel')\n else:\n # list the dependencies\n call(['delocate-listdeps', built_wheel], env=env)\n # rebuild the wheel with shared libraries included and place in output dir\n call(['delocate-wheel', '-w', '/tmp/delocated_wheel', built_wheel], env=env)\n delocated_wheel = glob('/tmp/delocated_wheel/*.whl')[0]\n\n # install the wheel\n call(['pip', 'install', delocated_wheel], env=env)\n\n # test the wheel\n if test_requires:\n call(['pip', 'install'] + test_requires, env=env)\n if test_command:\n # run the tests from $HOME, with an absolute path in the command\n # (this ensures that Python runs the tests against the installed wheel\n # and not the repo code)\n test_command_prepared = prepare_command(test_command, project=abs_project_dir)\n call(test_command_prepared, cwd=os.environ['HOME'], env=env, shell=True)\n\n # we're all done here; move it to output (overwrite existing)\n dst = os.path.join(output_dir, os.path.basename(delocated_wheel))\n shutil.move(delocated_wheel, dst)\n", "path": "cibuildwheel/macos.py"}]}
| 2,671 | 198 |
gh_patches_debug_16503
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-1061
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dockerfile scan fails when in directory and used -f
**Describe the bug**
When running directory scan checkov shows Dockerfile failed checks. When scanning file no errors are shown.
**To Reproduce**
Create Dockerfile in directory `test` with content:
```
FROM debian:buster
ENV CHECKOV_VERSION 1.0.775
RUN export DEBIAN_FRONTEND=noninteractive && \
apt-get -y update && \
apt-get -y --no-install-recommends install wget unzip ca-certificates git python3 python3-pip python3-setuptools python3-wheel && \
pip3 install -U checkov=="${CHECKOV_VERSION}"
```
`checkov -f test/Dockerfile` won't show errors
`checkov -d test` will show error
**Expected behavior**
Show error in both cases.
**Screenshots**
<img width="892" alt="Screenshot 2021-04-10 at 09 39 21" src="https://user-images.githubusercontent.com/672767/114262507-a54dde80-99e0-11eb-9e9e-3e3f5d2d2a7f.png">
**Desktop (please complete the following information):**
- OS: MacOS 11.2.3
- Python: 3.9.4
- Checkov Version 2.0.27
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/dockerfile/runner.py`
Content:
```
1 import logging
2 import os
3 from dockerfile_parse.constants import DOCKERFILE_FILENAME
4
5 from checkov.common.output.record import Record
6 from checkov.common.output.report import Report
7 from checkov.common.runners.base_runner import BaseRunner, filter_ignored_directories
8 from checkov.dockerfile.parser import parse, collect_skipped_checks
9 from checkov.dockerfile.registry import registry
10 from checkov.runner_filter import RunnerFilter
11
12 DOCKER_FILE_MASK = [DOCKERFILE_FILENAME]
13
14
15 class Runner(BaseRunner):
16 check_type = "dockerfile"
17
18 def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter(),
19 collect_skip_comments=True):
20 report = Report(self.check_type)
21 definitions = {}
22 definitions_raw = {}
23 parsing_errors = {}
24 files_list = []
25 if external_checks_dir:
26 for directory in external_checks_dir:
27 registry.load_external_checks(directory)
28
29 if files:
30 for file in files:
31 if file in DOCKER_FILE_MASK:
32 (definitions[file], definitions_raw[file]) = parse(file)
33
34 if root_folder:
35 for root, d_names, f_names in os.walk(root_folder):
36 filter_ignored_directories(d_names)
37 for file in f_names:
38 if file in DOCKER_FILE_MASK:
39 files_list.append(os.path.join(root, file))
40
41 for file in files_list:
42 relative_file_path = f'/{os.path.relpath(file, os.path.commonprefix((root_folder, file)))}'
43 try:
44 (definitions[relative_file_path], definitions_raw[relative_file_path]) = parse(file)
45 except TypeError:
46 logging.info(f'Dockerfile skipping {file} as it is not a valid dockerfile template')
47
48 for docker_file_path in definitions.keys():
49
50 # There are a few cases here. If -f was used, there could be a leading / because it's an absolute path,
51 # or there will be no leading slash; root_folder will always be none.
52 # If -d is used, root_folder will be the value given, and -f will start with a / (hardcoded above).
53 # The goal here is simply to get a valid path to the file (which docker_file_path does not always give).
54 if docker_file_path[0] == '/':
55 path_to_convert = (root_folder + docker_file_path) if root_folder else docker_file_path
56 else:
57 path_to_convert = (os.path.join(root_folder, docker_file_path)) if root_folder else docker_file_path
58
59 file_abs_path = os.path.abspath(path_to_convert)
60 skipped_checks = collect_skipped_checks(definitions[docker_file_path])
61 instructions = definitions[docker_file_path]
62
63 results = registry.scan(docker_file_path, instructions, skipped_checks,
64 runner_filter)
65 for check, check_result in results.items():
66 result_configuration = check_result['results_configuration']
67 startline = 0
68 endline = 0
69 result_instruction = ""
70 if result_configuration:
71 startline = result_configuration['startline']
72 endline = result_configuration['endline']
73 result_instruction = result_configuration["instruction"]
74
75 codeblock = []
76 self.calc_record_codeblock(codeblock, definitions_raw, docker_file_path, endline, startline)
77 record = Record(check_id=check.id, check_name=check.name, check_result=check_result,
78 code_block=codeblock,
79 file_path=docker_file_path,
80 file_line_range=[startline,
81 endline],
82 resource="{}.{}".format(docker_file_path,
83 result_instruction,
84 startline),
85 evaluations=None, check_class=check.__class__.__module__,
86 file_abs_path=file_abs_path, entity_tags=None)
87 report.add_record(record=record)
88
89 return report
90
91
92 def calc_record_codeblock(self, codeblock, definitions_raw, docker_file_path, endline, startline):
93 for line in range(startline, endline + 1):
94 codeblock.append((line, definitions_raw[docker_file_path][line]))
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/checkov/dockerfile/runner.py b/checkov/dockerfile/runner.py
--- a/checkov/dockerfile/runner.py
+++ b/checkov/dockerfile/runner.py
@@ -15,7 +15,7 @@
class Runner(BaseRunner):
check_type = "dockerfile"
- def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter(),
+ def run(self, root_folder=None, external_checks_dir=None, files=None, runner_filter=RunnerFilter(),
collect_skip_comments=True):
report = Report(self.check_type)
definitions = {}
@@ -28,7 +28,7 @@
if files:
for file in files:
- if file in DOCKER_FILE_MASK:
+ if os.path.basename(file) in DOCKER_FILE_MASK:
(definitions[file], definitions_raw[file]) = parse(file)
if root_folder:
|
{"golden_diff": "diff --git a/checkov/dockerfile/runner.py b/checkov/dockerfile/runner.py\n--- a/checkov/dockerfile/runner.py\n+++ b/checkov/dockerfile/runner.py\n@@ -15,7 +15,7 @@\n class Runner(BaseRunner):\n check_type = \"dockerfile\"\n \n- def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter(),\n+ def run(self, root_folder=None, external_checks_dir=None, files=None, runner_filter=RunnerFilter(),\n collect_skip_comments=True):\n report = Report(self.check_type)\n definitions = {}\n@@ -28,7 +28,7 @@\n \n if files:\n for file in files:\n- if file in DOCKER_FILE_MASK:\n+ if os.path.basename(file) in DOCKER_FILE_MASK:\n (definitions[file], definitions_raw[file]) = parse(file)\n \n if root_folder:\n", "issue": "Dockerfile scan fails when in directory and used -f\n**Describe the bug**\r\nWhen running directory scan checkov shows Dockerfile failed checks. When scanning file no errors are shown.\r\n\r\n**To Reproduce**\r\nCreate Dockerfile in directory `test` with content:\r\n```\r\nFROM debian:buster\r\n\r\nENV CHECKOV_VERSION 1.0.775\r\n\r\nRUN export DEBIAN_FRONTEND=noninteractive && \\\r\n apt-get -y update && \\\r\n apt-get -y --no-install-recommends install wget unzip ca-certificates git python3 python3-pip python3-setuptools python3-wheel && \\\r\n pip3 install -U checkov==\"${CHECKOV_VERSION}\"\r\n```\r\n\r\n`checkov -f test/Dockerfile` won't show errors\r\n`checkov -d test` will show error\r\n\r\n**Expected behavior**\r\nShow error in both cases.\r\n\r\n**Screenshots**\r\n<img width=\"892\" alt=\"Screenshot 2021-04-10 at 09 39 21\" src=\"https://user-images.githubusercontent.com/672767/114262507-a54dde80-99e0-11eb-9e9e-3e3f5d2d2a7f.png\">\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: MacOS 11.2.3\r\n - Python: 3.9.4\r\n - Checkov Version 2.0.27\r\n\n", "before_files": [{"content": "import logging\nimport os\nfrom dockerfile_parse.constants import DOCKERFILE_FILENAME\n\nfrom checkov.common.output.record import Record\nfrom checkov.common.output.report import Report\nfrom checkov.common.runners.base_runner import BaseRunner, filter_ignored_directories\nfrom checkov.dockerfile.parser import parse, collect_skipped_checks\nfrom checkov.dockerfile.registry import registry\nfrom checkov.runner_filter import RunnerFilter\n\nDOCKER_FILE_MASK = [DOCKERFILE_FILENAME]\n\n\nclass Runner(BaseRunner):\n check_type = \"dockerfile\"\n\n def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter(),\n collect_skip_comments=True):\n report = Report(self.check_type)\n definitions = {}\n definitions_raw = {}\n parsing_errors = {}\n files_list = []\n if external_checks_dir:\n for directory in external_checks_dir:\n registry.load_external_checks(directory)\n\n if files:\n for file in files:\n if file in DOCKER_FILE_MASK:\n (definitions[file], definitions_raw[file]) = parse(file)\n\n if root_folder:\n for root, d_names, f_names in os.walk(root_folder):\n filter_ignored_directories(d_names)\n for file in f_names:\n if file in DOCKER_FILE_MASK:\n files_list.append(os.path.join(root, file))\n\n for file in files_list:\n relative_file_path = f'/{os.path.relpath(file, os.path.commonprefix((root_folder, file)))}'\n try:\n (definitions[relative_file_path], definitions_raw[relative_file_path]) = parse(file)\n except TypeError:\n logging.info(f'Dockerfile skipping {file} as it is not a valid dockerfile template')\n\n for docker_file_path in definitions.keys():\n\n # There are a few cases here. If -f was used, there could be a leading / because it's an absolute path,\n # or there will be no leading slash; root_folder will always be none.\n # If -d is used, root_folder will be the value given, and -f will start with a / (hardcoded above).\n # The goal here is simply to get a valid path to the file (which docker_file_path does not always give).\n if docker_file_path[0] == '/':\n path_to_convert = (root_folder + docker_file_path) if root_folder else docker_file_path\n else:\n path_to_convert = (os.path.join(root_folder, docker_file_path)) if root_folder else docker_file_path\n\n file_abs_path = os.path.abspath(path_to_convert)\n skipped_checks = collect_skipped_checks(definitions[docker_file_path])\n instructions = definitions[docker_file_path]\n\n results = registry.scan(docker_file_path, instructions, skipped_checks,\n runner_filter)\n for check, check_result in results.items():\n result_configuration = check_result['results_configuration']\n startline = 0\n endline = 0\n result_instruction = \"\"\n if result_configuration:\n startline = result_configuration['startline']\n endline = result_configuration['endline']\n result_instruction = result_configuration[\"instruction\"]\n\n codeblock = []\n self.calc_record_codeblock(codeblock, definitions_raw, docker_file_path, endline, startline)\n record = Record(check_id=check.id, check_name=check.name, check_result=check_result,\n code_block=codeblock,\n file_path=docker_file_path,\n file_line_range=[startline,\n endline],\n resource=\"{}.{}\".format(docker_file_path,\n result_instruction,\n startline),\n evaluations=None, check_class=check.__class__.__module__,\n file_abs_path=file_abs_path, entity_tags=None)\n report.add_record(record=record)\n\n return report\n\n\n def calc_record_codeblock(self, codeblock, definitions_raw, docker_file_path, endline, startline):\n for line in range(startline, endline + 1):\n codeblock.append((line, definitions_raw[docker_file_path][line]))\n", "path": "checkov/dockerfile/runner.py"}], "after_files": [{"content": "import logging\nimport os\nfrom dockerfile_parse.constants import DOCKERFILE_FILENAME\n\nfrom checkov.common.output.record import Record\nfrom checkov.common.output.report import Report\nfrom checkov.common.runners.base_runner import BaseRunner, filter_ignored_directories\nfrom checkov.dockerfile.parser import parse, collect_skipped_checks\nfrom checkov.dockerfile.registry import registry\nfrom checkov.runner_filter import RunnerFilter\n\nDOCKER_FILE_MASK = [DOCKERFILE_FILENAME]\n\n\nclass Runner(BaseRunner):\n check_type = \"dockerfile\"\n\n def run(self, root_folder=None, external_checks_dir=None, files=None, runner_filter=RunnerFilter(),\n collect_skip_comments=True):\n report = Report(self.check_type)\n definitions = {}\n definitions_raw = {}\n parsing_errors = {}\n files_list = []\n if external_checks_dir:\n for directory in external_checks_dir:\n registry.load_external_checks(directory)\n\n if files:\n for file in files:\n if os.path.basename(file) in DOCKER_FILE_MASK:\n (definitions[file], definitions_raw[file]) = parse(file)\n\n if root_folder:\n for root, d_names, f_names in os.walk(root_folder):\n filter_ignored_directories(d_names)\n for file in f_names:\n if file in DOCKER_FILE_MASK:\n files_list.append(os.path.join(root, file))\n\n for file in files_list:\n relative_file_path = f'/{os.path.relpath(file, os.path.commonprefix((root_folder, file)))}'\n try:\n (definitions[relative_file_path], definitions_raw[relative_file_path]) = parse(file)\n except TypeError:\n logging.info(f'Dockerfile skipping {file} as it is not a valid dockerfile template')\n\n for docker_file_path in definitions.keys():\n\n # There are a few cases here. If -f was used, there could be a leading / because it's an absolute path,\n # or there will be no leading slash; root_folder will always be none.\n # If -d is used, root_folder will be the value given, and -f will start with a / (hardcoded above).\n # The goal here is simply to get a valid path to the file (which docker_file_path does not always give).\n if docker_file_path[0] == '/':\n path_to_convert = (root_folder + docker_file_path) if root_folder else docker_file_path\n else:\n path_to_convert = (os.path.join(root_folder, docker_file_path)) if root_folder else docker_file_path\n\n file_abs_path = os.path.abspath(path_to_convert)\n skipped_checks = collect_skipped_checks(definitions[docker_file_path])\n instructions = definitions[docker_file_path]\n\n results = registry.scan(docker_file_path, instructions, skipped_checks,\n runner_filter)\n for check, check_result in results.items():\n result_configuration = check_result['results_configuration']\n startline = 0\n endline = 0\n result_instruction = \"\"\n if result_configuration:\n startline = result_configuration['startline']\n endline = result_configuration['endline']\n result_instruction = result_configuration[\"instruction\"]\n\n codeblock = []\n self.calc_record_codeblock(codeblock, definitions_raw, docker_file_path, endline, startline)\n record = Record(check_id=check.id, check_name=check.name, check_result=check_result,\n code_block=codeblock,\n file_path=docker_file_path,\n file_line_range=[startline,\n endline],\n resource=\"{}.{}\".format(docker_file_path,\n result_instruction,\n startline),\n evaluations=None, check_class=check.__class__.__module__,\n file_abs_path=file_abs_path, entity_tags=None)\n report.add_record(record=record)\n\n return report\n\n\n def calc_record_codeblock(self, codeblock, definitions_raw, docker_file_path, endline, startline):\n for line in range(startline, endline + 1):\n codeblock.append((line, definitions_raw[docker_file_path][line]))\n", "path": "checkov/dockerfile/runner.py"}]}
| 1,610 | 197 |
gh_patches_debug_22436
|
rasdani/github-patches
|
git_diff
|
common-workflow-language__cwltool-1346
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
drop Python3.5 support on 2020-09-13
Something to look forward to :-)
https://devguide.python.org/#status-of-python-branches
Branch | Schedule | Status | First release | End-of-life
-- | -- | -- | -- | --
3.5 | PEP 478 | security | 2015-09-13 | 2020-09-13
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python3
2 """Setup for the reference implementation of the CWL standards."""
3 import os
4 import sys
5
6 import setuptools.command.egg_info as egg_info_cmd
7 from setuptools import setup
8
9 SETUP_DIR = os.path.dirname(__file__)
10 README = os.path.join(SETUP_DIR, "README.rst")
11
12 try:
13 import gittaggers
14
15 Tagger = gittaggers.EggInfoFromGit
16 except ImportError:
17 Tagger = egg_info_cmd.egg_info
18
19 NEEDS_PYTEST = {"pytest", "test", "ptr"}.intersection(sys.argv)
20 PYTEST_RUNNER = ["pytest-runner", "pytest-cov"] if NEEDS_PYTEST else []
21 USE_MYPYC = False
22 # To compile with mypyc, a mypyc checkout must be present on the PYTHONPATH
23 if len(sys.argv) > 1 and sys.argv[1] == "--use-mypyc":
24 sys.argv.pop(1)
25 USE_MYPYC = True
26 if os.getenv("CWLTOOL_USE_MYPYC", None) == "1":
27 USE_MYPYC = True
28
29 if USE_MYPYC:
30 mypyc_targets = [
31 "cwltool/argparser.py",
32 "cwltool/builder.py",
33 "cwltool/checker.py",
34 "cwltool/command_line_tool.py",
35 # "cwltool/context.py", # monkeypatching
36 "cwltool/cwlrdf.py",
37 "cwltool/docker_id.py",
38 "cwltool/docker.py",
39 "cwltool/udocker.py",
40 "cwltool/errors.py",
41 "cwltool/executors.py",
42 "cwltool/expression.py",
43 "cwltool/factory.py",
44 "cwltool/flatten.py",
45 # "cwltool/__init__.py",
46 "cwltool/job.py",
47 "cwltool/load_tool.py",
48 # "cwltool/loghandler.py", # so we can monkeypatch the logger from tests
49 # "cwltool/__main__.py",
50 "cwltool/main.py",
51 "cwltool/mutation.py",
52 "cwltool/pack.py",
53 # "cwltool/pathmapper.py", # class PathMapper needs to be subclassable
54 "cwltool/process.py",
55 "cwltool/procgenerator.py",
56 # "cwltool/provenance.py", # WritableBag is having issues
57 "cwltool/resolver.py",
58 # "cwltool/sandboxjs.py", # probably not speed critical, tests need to mock components
59 "cwltool/secrets.py",
60 "cwltool/singularity.py",
61 "cwltool/software_requirements.py",
62 "cwltool/stdfsaccess.py",
63 "cwltool/subgraph.py",
64 "cwltool/update.py",
65 "cwltool/utils.py",
66 "cwltool/validate_js.py",
67 "cwltool/workflow.py",
68 ]
69
70 from mypyc.build import mypycify
71
72 opt_level = os.getenv("MYPYC_OPT_LEVEL", "3")
73 ext_modules = mypycify(mypyc_targets, opt_level=opt_level)
74 else:
75 ext_modules = []
76
77 setup(
78 name="cwltool",
79 version="3.0",
80 description="Common workflow language reference implementation",
81 long_description=open(README).read(),
82 long_description_content_type="text/x-rst",
83 author="Common workflow language working group",
84 author_email="[email protected]",
85 url="https://github.com/common-workflow-language/cwltool",
86 download_url="https://github.com/common-workflow-language/cwltool",
87 ext_modules=ext_modules,
88 # platforms='', # empty as is conveyed by the classifier below
89 # license='', # empty as is conveyed by the classifier below
90 packages=["cwltool", "cwltool.tests"],
91 package_dir={"cwltool.tests": "tests"},
92 include_package_data=True,
93 install_requires=[
94 "setuptools",
95 "requests >= 2.6.1", # >= 2.6.1 to workaround
96 # https://github.com/ionrock/cachecontrol/issues/137
97 "ruamel.yaml >= 0.12.4, <= 0.16.5",
98 "rdflib >= 4.2.2, < 4.3.0",
99 "shellescape >= 3.4.1, < 3.5",
100 "schema-salad >= 7, < 8",
101 "mypy-extensions",
102 "psutil",
103 "prov == 1.5.1",
104 "bagit >= 1.6.4",
105 "typing-extensions",
106 "coloredlogs",
107 'pydot >= 1.4.1',
108 ],
109 extras_require={
110 ':python_version<"3.6"': ["typing >= 3.5.3"],
111 "deps": ["galaxy-tool-util"],
112 "docs": ["sphinx >= 2.2", "sphinx-rtd-theme"],
113 },
114 python_requires=">=3.5, <4",
115 setup_requires=PYTEST_RUNNER,
116 test_suite="tests",
117 tests_require=[
118 "pytest < 7",
119 "mock >= 2.0.0",
120 "pytest-mock >= 1.10.0",
121 "arcp >= 0.2.0",
122 "rdflib-jsonld >= 0.4.0",
123 ],
124 entry_points={"console_scripts": ["cwltool=cwltool.main:run"]},
125 zip_safe=True,
126 cmdclass={"egg_info": Tagger},
127 classifiers=[
128 "Development Status :: 5 - Production/Stable",
129 "Environment :: Console",
130 "Intended Audience :: Developers",
131 "Intended Audience :: Science/Research",
132 "Intended Audience :: Healthcare Industry",
133 "License :: OSI Approved :: Apache Software License",
134 "Natural Language :: English",
135 "Operating System :: MacOS :: MacOS X",
136 "Operating System :: POSIX",
137 "Operating System :: POSIX :: Linux",
138 "Operating System :: OS Independent",
139 "Operating System :: Microsoft :: Windows",
140 "Operating System :: Microsoft :: Windows :: Windows 10",
141 "Operating System :: Microsoft :: Windows :: Windows 8.1",
142 # 'Operating System :: Microsoft :: Windows :: Windows 8', # not tested
143 # 'Operating System :: Microsoft :: Windows :: Windows 7', # not tested
144 "Programming Language :: Python :: 3",
145 "Programming Language :: Python :: 3.5",
146 "Programming Language :: Python :: 3.6",
147 "Programming Language :: Python :: 3.7",
148 "Programming Language :: Python :: 3.8",
149 "Topic :: Scientific/Engineering",
150 "Topic :: Scientific/Engineering :: Bio-Informatics",
151 "Topic :: Scientific/Engineering :: Astronomy",
152 "Topic :: Scientific/Engineering :: Atmospheric Science",
153 "Topic :: Scientific/Engineering :: Information Analysis",
154 "Topic :: Scientific/Engineering :: Medical Science Apps.",
155 "Topic :: System :: Distributed Computing",
156 "Topic :: Utilities",
157 ],
158 )
159
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -107,11 +107,10 @@
'pydot >= 1.4.1',
],
extras_require={
- ':python_version<"3.6"': ["typing >= 3.5.3"],
"deps": ["galaxy-tool-util"],
"docs": ["sphinx >= 2.2", "sphinx-rtd-theme"],
},
- python_requires=">=3.5, <4",
+ python_requires=">=3.6, <4",
setup_requires=PYTEST_RUNNER,
test_suite="tests",
tests_require=[
@@ -142,7 +141,6 @@
# 'Operating System :: Microsoft :: Windows :: Windows 8', # not tested
# 'Operating System :: Microsoft :: Windows :: Windows 7', # not tested
"Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -107,11 +107,10 @@\n 'pydot >= 1.4.1',\n ],\n extras_require={\n- ':python_version<\"3.6\"': [\"typing >= 3.5.3\"],\n \"deps\": [\"galaxy-tool-util\"],\n \"docs\": [\"sphinx >= 2.2\", \"sphinx-rtd-theme\"],\n },\n- python_requires=\">=3.5, <4\",\n+ python_requires=\">=3.6, <4\",\n setup_requires=PYTEST_RUNNER,\n test_suite=\"tests\",\n tests_require=[\n@@ -142,7 +141,6 @@\n # 'Operating System :: Microsoft :: Windows :: Windows 8', # not tested\n # 'Operating System :: Microsoft :: Windows :: Windows 7', # not tested\n \"Programming Language :: Python :: 3\",\n- \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n", "issue": "drop Python3.5 support on 2020-09-13\nSomething to look forward to :-)\r\n\r\nhttps://devguide.python.org/#status-of-python-branches\r\n\r\n\r\nBranch | Schedule | Status | First release | End-of-life\r\n-- | -- | -- | -- | --\r\n3.5 | PEP 478 | security | 2015-09-13 | 2020-09-13\r\n\r\n\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\"\"\"Setup for the reference implementation of the CWL standards.\"\"\"\nimport os\nimport sys\n\nimport setuptools.command.egg_info as egg_info_cmd\nfrom setuptools import setup\n\nSETUP_DIR = os.path.dirname(__file__)\nREADME = os.path.join(SETUP_DIR, \"README.rst\")\n\ntry:\n import gittaggers\n\n Tagger = gittaggers.EggInfoFromGit\nexcept ImportError:\n Tagger = egg_info_cmd.egg_info\n\nNEEDS_PYTEST = {\"pytest\", \"test\", \"ptr\"}.intersection(sys.argv)\nPYTEST_RUNNER = [\"pytest-runner\", \"pytest-cov\"] if NEEDS_PYTEST else []\nUSE_MYPYC = False\n# To compile with mypyc, a mypyc checkout must be present on the PYTHONPATH\nif len(sys.argv) > 1 and sys.argv[1] == \"--use-mypyc\":\n sys.argv.pop(1)\n USE_MYPYC = True\nif os.getenv(\"CWLTOOL_USE_MYPYC\", None) == \"1\":\n USE_MYPYC = True\n\nif USE_MYPYC:\n mypyc_targets = [\n \"cwltool/argparser.py\",\n \"cwltool/builder.py\",\n \"cwltool/checker.py\",\n \"cwltool/command_line_tool.py\",\n # \"cwltool/context.py\", # monkeypatching\n \"cwltool/cwlrdf.py\",\n \"cwltool/docker_id.py\",\n \"cwltool/docker.py\",\n \"cwltool/udocker.py\",\n \"cwltool/errors.py\",\n \"cwltool/executors.py\",\n \"cwltool/expression.py\",\n \"cwltool/factory.py\",\n \"cwltool/flatten.py\",\n # \"cwltool/__init__.py\",\n \"cwltool/job.py\",\n \"cwltool/load_tool.py\",\n # \"cwltool/loghandler.py\", # so we can monkeypatch the logger from tests\n # \"cwltool/__main__.py\",\n \"cwltool/main.py\",\n \"cwltool/mutation.py\",\n \"cwltool/pack.py\",\n # \"cwltool/pathmapper.py\", # class PathMapper needs to be subclassable\n \"cwltool/process.py\",\n \"cwltool/procgenerator.py\",\n # \"cwltool/provenance.py\", # WritableBag is having issues\n \"cwltool/resolver.py\",\n # \"cwltool/sandboxjs.py\", # probably not speed critical, tests need to mock components\n \"cwltool/secrets.py\",\n \"cwltool/singularity.py\",\n \"cwltool/software_requirements.py\",\n \"cwltool/stdfsaccess.py\",\n \"cwltool/subgraph.py\",\n \"cwltool/update.py\",\n \"cwltool/utils.py\",\n \"cwltool/validate_js.py\",\n \"cwltool/workflow.py\",\n ]\n\n from mypyc.build import mypycify\n\n opt_level = os.getenv(\"MYPYC_OPT_LEVEL\", \"3\")\n ext_modules = mypycify(mypyc_targets, opt_level=opt_level)\nelse:\n ext_modules = []\n\nsetup(\n name=\"cwltool\",\n version=\"3.0\",\n description=\"Common workflow language reference implementation\",\n long_description=open(README).read(),\n long_description_content_type=\"text/x-rst\",\n author=\"Common workflow language working group\",\n author_email=\"[email protected]\",\n url=\"https://github.com/common-workflow-language/cwltool\",\n download_url=\"https://github.com/common-workflow-language/cwltool\",\n ext_modules=ext_modules,\n # platforms='', # empty as is conveyed by the classifier below\n # license='', # empty as is conveyed by the classifier below\n packages=[\"cwltool\", \"cwltool.tests\"],\n package_dir={\"cwltool.tests\": \"tests\"},\n include_package_data=True,\n install_requires=[\n \"setuptools\",\n \"requests >= 2.6.1\", # >= 2.6.1 to workaround\n # https://github.com/ionrock/cachecontrol/issues/137\n \"ruamel.yaml >= 0.12.4, <= 0.16.5\",\n \"rdflib >= 4.2.2, < 4.3.0\",\n \"shellescape >= 3.4.1, < 3.5\",\n \"schema-salad >= 7, < 8\",\n \"mypy-extensions\",\n \"psutil\",\n \"prov == 1.5.1\",\n \"bagit >= 1.6.4\",\n \"typing-extensions\",\n \"coloredlogs\",\n 'pydot >= 1.4.1',\n ],\n extras_require={\n ':python_version<\"3.6\"': [\"typing >= 3.5.3\"],\n \"deps\": [\"galaxy-tool-util\"],\n \"docs\": [\"sphinx >= 2.2\", \"sphinx-rtd-theme\"],\n },\n python_requires=\">=3.5, <4\",\n setup_requires=PYTEST_RUNNER,\n test_suite=\"tests\",\n tests_require=[\n \"pytest < 7\",\n \"mock >= 2.0.0\",\n \"pytest-mock >= 1.10.0\",\n \"arcp >= 0.2.0\",\n \"rdflib-jsonld >= 0.4.0\",\n ],\n entry_points={\"console_scripts\": [\"cwltool=cwltool.main:run\"]},\n zip_safe=True,\n cmdclass={\"egg_info\": Tagger},\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Healthcare Industry\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: OS Independent\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: Microsoft :: Windows :: Windows 10\",\n \"Operating System :: Microsoft :: Windows :: Windows 8.1\",\n # 'Operating System :: Microsoft :: Windows :: Windows 8', # not tested\n # 'Operating System :: Microsoft :: Windows :: Windows 7', # not tested\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Bio-Informatics\",\n \"Topic :: Scientific/Engineering :: Astronomy\",\n \"Topic :: Scientific/Engineering :: Atmospheric Science\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps.\",\n \"Topic :: System :: Distributed Computing\",\n \"Topic :: Utilities\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\"\"\"Setup for the reference implementation of the CWL standards.\"\"\"\nimport os\nimport sys\n\nimport setuptools.command.egg_info as egg_info_cmd\nfrom setuptools import setup\n\nSETUP_DIR = os.path.dirname(__file__)\nREADME = os.path.join(SETUP_DIR, \"README.rst\")\n\ntry:\n import gittaggers\n\n Tagger = gittaggers.EggInfoFromGit\nexcept ImportError:\n Tagger = egg_info_cmd.egg_info\n\nNEEDS_PYTEST = {\"pytest\", \"test\", \"ptr\"}.intersection(sys.argv)\nPYTEST_RUNNER = [\"pytest-runner\", \"pytest-cov\"] if NEEDS_PYTEST else []\nUSE_MYPYC = False\n# To compile with mypyc, a mypyc checkout must be present on the PYTHONPATH\nif len(sys.argv) > 1 and sys.argv[1] == \"--use-mypyc\":\n sys.argv.pop(1)\n USE_MYPYC = True\nif os.getenv(\"CWLTOOL_USE_MYPYC\", None) == \"1\":\n USE_MYPYC = True\n\nif USE_MYPYC:\n mypyc_targets = [\n \"cwltool/argparser.py\",\n \"cwltool/builder.py\",\n \"cwltool/checker.py\",\n \"cwltool/command_line_tool.py\",\n # \"cwltool/context.py\", # monkeypatching\n \"cwltool/cwlrdf.py\",\n \"cwltool/docker_id.py\",\n \"cwltool/docker.py\",\n \"cwltool/udocker.py\",\n \"cwltool/errors.py\",\n \"cwltool/executors.py\",\n \"cwltool/expression.py\",\n \"cwltool/factory.py\",\n \"cwltool/flatten.py\",\n # \"cwltool/__init__.py\",\n \"cwltool/job.py\",\n \"cwltool/load_tool.py\",\n # \"cwltool/loghandler.py\", # so we can monkeypatch the logger from tests\n # \"cwltool/__main__.py\",\n \"cwltool/main.py\",\n \"cwltool/mutation.py\",\n \"cwltool/pack.py\",\n # \"cwltool/pathmapper.py\", # class PathMapper needs to be subclassable\n \"cwltool/process.py\",\n \"cwltool/procgenerator.py\",\n # \"cwltool/provenance.py\", # WritableBag is having issues\n \"cwltool/resolver.py\",\n # \"cwltool/sandboxjs.py\", # probably not speed critical, tests need to mock components\n \"cwltool/secrets.py\",\n \"cwltool/singularity.py\",\n \"cwltool/software_requirements.py\",\n \"cwltool/stdfsaccess.py\",\n \"cwltool/subgraph.py\",\n \"cwltool/update.py\",\n \"cwltool/utils.py\",\n \"cwltool/validate_js.py\",\n \"cwltool/workflow.py\",\n ]\n\n from mypyc.build import mypycify\n\n opt_level = os.getenv(\"MYPYC_OPT_LEVEL\", \"3\")\n ext_modules = mypycify(mypyc_targets, opt_level=opt_level)\nelse:\n ext_modules = []\n\nsetup(\n name=\"cwltool\",\n version=\"3.0\",\n description=\"Common workflow language reference implementation\",\n long_description=open(README).read(),\n long_description_content_type=\"text/x-rst\",\n author=\"Common workflow language working group\",\n author_email=\"[email protected]\",\n url=\"https://github.com/common-workflow-language/cwltool\",\n download_url=\"https://github.com/common-workflow-language/cwltool\",\n ext_modules=ext_modules,\n # platforms='', # empty as is conveyed by the classifier below\n # license='', # empty as is conveyed by the classifier below\n packages=[\"cwltool\", \"cwltool.tests\"],\n package_dir={\"cwltool.tests\": \"tests\"},\n include_package_data=True,\n install_requires=[\n \"setuptools\",\n \"requests >= 2.6.1\", # >= 2.6.1 to workaround\n # https://github.com/ionrock/cachecontrol/issues/137\n \"ruamel.yaml >= 0.12.4, <= 0.16.5\",\n \"rdflib >= 4.2.2, < 4.3.0\",\n \"shellescape >= 3.4.1, < 3.5\",\n \"schema-salad >= 7, < 8\",\n \"mypy-extensions\",\n \"psutil\",\n \"prov == 1.5.1\",\n \"bagit >= 1.6.4\",\n \"typing-extensions\",\n \"coloredlogs\",\n 'pydot >= 1.4.1',\n ],\n extras_require={\n \"deps\": [\"galaxy-tool-util\"],\n \"docs\": [\"sphinx >= 2.2\", \"sphinx-rtd-theme\"],\n },\n python_requires=\">=3.6, <4\",\n setup_requires=PYTEST_RUNNER,\n test_suite=\"tests\",\n tests_require=[\n \"pytest < 7\",\n \"mock >= 2.0.0\",\n \"pytest-mock >= 1.10.0\",\n \"arcp >= 0.2.0\",\n \"rdflib-jsonld >= 0.4.0\",\n ],\n entry_points={\"console_scripts\": [\"cwltool=cwltool.main:run\"]},\n zip_safe=True,\n cmdclass={\"egg_info\": Tagger},\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Healthcare Industry\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: OS Independent\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: Microsoft :: Windows :: Windows 10\",\n \"Operating System :: Microsoft :: Windows :: Windows 8.1\",\n # 'Operating System :: Microsoft :: Windows :: Windows 8', # not tested\n # 'Operating System :: Microsoft :: Windows :: Windows 7', # not tested\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Bio-Informatics\",\n \"Topic :: Scientific/Engineering :: Astronomy\",\n \"Topic :: Scientific/Engineering :: Atmospheric Science\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps.\",\n \"Topic :: System :: Distributed Computing\",\n \"Topic :: Utilities\",\n ],\n)\n", "path": "setup.py"}]}
| 2,287 | 263 |
gh_patches_debug_41075
|
rasdani/github-patches
|
git_diff
|
vyperlang__vyper-828
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Solidity Compatible ERC20 broken
The [Solidity compatible ERC20 token](https://github.com/ethereum/vyper/blob/master/examples/tokens/ERC20_solidity_compatible/ERC20.v.py) no longer compiles, since it was not updated after the removal of separate uint256 math functions. This is a super easy fix. I can do it later in the week if no one gets to it before then.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/tokens/ERC20_solidity_compatible/ERC20.v.py`
Content:
```
1 # Solidity-Compatible EIP20/ERC20 Token
2 # Implements https://github.com/ethereum/EIPs/blob/master/EIPS/eip-20-token-standard.md
3 # Author: Phil Daian
4
5 # The use of the uint256 datatype as in this token is not
6 # recommended, as it can pose security risks.
7
8 # This token is intended as a proof of concept towards
9 # language interoperability and not for production use.
10
11 # Events issued by the contract
12 Transfer: event({_from: indexed(address), _to: indexed(address), _value: uint256})
13 Approval: event({_owner: indexed(address), _spender: indexed(address), _value: uint256})
14
15 balances: uint256[address]
16 allowances: (uint256[address])[address]
17 num_issued: uint256
18
19 @public
20 @payable
21 def deposit():
22 _value: uint256 = convert(msg.value, 'uint256')
23 _sender: address = msg.sender
24 self.balances[_sender] = uint256_add(self.balances[_sender], _value)
25 self.num_issued = uint256_add(self.num_issued, _value)
26 # Fire deposit event as transfer from 0x0
27 log.Transfer(0x0000000000000000000000000000000000000000, _sender, _value)
28
29 @public
30 def withdraw(_value : uint256) -> bool:
31 _sender: address = msg.sender
32 # Make sure sufficient funds are present, op will not underflow supply
33 # implicitly through overflow protection
34 self.balances[_sender] = uint256_sub(self.balances[_sender], _value)
35 self.num_issued = uint256_sub(self.num_issued, _value)
36 send(_sender, as_wei_value(convert(_value, 'int128'), 'wei'))
37 # Fire withdraw event as transfer to 0x0
38 log.Transfer(_sender, 0x0000000000000000000000000000000000000000, _value)
39 return true
40
41 @public
42 @constant
43 def totalSupply() -> uint256:
44 return self.num_issued
45
46 @public
47 @constant
48 def balanceOf(_owner : address) -> uint256:
49 return self.balances[_owner]
50
51 @public
52 def transfer(_to : address, _value : uint256) -> bool:
53 _sender: address = msg.sender
54 # Make sure sufficient funds are present implicitly through overflow protection
55 self.balances[_sender] = uint256_sub(self.balances[_sender], _value)
56 self.balances[_to] = uint256_add(self.balances[_to], _value)
57 # Fire transfer event
58 log.Transfer(_sender, _to, _value)
59 return true
60
61 @public
62 def transferFrom(_from : address, _to : address, _value : uint256) -> bool:
63 _sender: address = msg.sender
64 allowance: uint256 = self.allowances[_from][_sender]
65 # Make sure sufficient funds/allowance are present implicitly through overflow protection
66 self.balances[_from] = uint256_sub(self.balances[_from], _value)
67 self.balances[_to] = uint256_add(self.balances[_to], _value)
68 self.allowances[_from][_sender] = uint256_sub(allowance, _value)
69 # Fire transfer event
70 log.Transfer(_from, _to, _value)
71 return true
72
73 @public
74 def approve(_spender : address, _value : uint256) -> bool:
75 _sender: address = msg.sender
76 self.allowances[_sender][_spender] = _value
77 # Fire approval event
78 log.Approval(_sender, _spender, _value)
79 return true
80
81 @public
82 @constant
83 def allowance(_owner : address, _spender : address) -> uint256:
84 return self.allowances[_owner][_spender]
85
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/tokens/ERC20_solidity_compatible/ERC20.v.py b/examples/tokens/ERC20_solidity_compatible/ERC20.v.py
--- a/examples/tokens/ERC20_solidity_compatible/ERC20.v.py
+++ b/examples/tokens/ERC20_solidity_compatible/ERC20.v.py
@@ -21,8 +21,8 @@
def deposit():
_value: uint256 = convert(msg.value, 'uint256')
_sender: address = msg.sender
- self.balances[_sender] = uint256_add(self.balances[_sender], _value)
- self.num_issued = uint256_add(self.num_issued, _value)
+ self.balances[_sender] = self.balances[_sender] + _value
+ self.num_issued = self.num_issued + _value
# Fire deposit event as transfer from 0x0
log.Transfer(0x0000000000000000000000000000000000000000, _sender, _value)
@@ -31,12 +31,12 @@
_sender: address = msg.sender
# Make sure sufficient funds are present, op will not underflow supply
# implicitly through overflow protection
- self.balances[_sender] = uint256_sub(self.balances[_sender], _value)
- self.num_issued = uint256_sub(self.num_issued, _value)
+ self.balances[_sender] = self.balances[_sender] - _value
+ self.num_issued = self.num_issued - _value
send(_sender, as_wei_value(convert(_value, 'int128'), 'wei'))
# Fire withdraw event as transfer to 0x0
log.Transfer(_sender, 0x0000000000000000000000000000000000000000, _value)
- return true
+ return True
@public
@constant
@@ -52,23 +52,23 @@
def transfer(_to : address, _value : uint256) -> bool:
_sender: address = msg.sender
# Make sure sufficient funds are present implicitly through overflow protection
- self.balances[_sender] = uint256_sub(self.balances[_sender], _value)
- self.balances[_to] = uint256_add(self.balances[_to], _value)
+ self.balances[_sender] = self.balances[_sender] - _value
+ self.balances[_to] = self.balances[_to] + _value
# Fire transfer event
log.Transfer(_sender, _to, _value)
- return true
+ return True
@public
def transferFrom(_from : address, _to : address, _value : uint256) -> bool:
_sender: address = msg.sender
allowance: uint256 = self.allowances[_from][_sender]
# Make sure sufficient funds/allowance are present implicitly through overflow protection
- self.balances[_from] = uint256_sub(self.balances[_from], _value)
- self.balances[_to] = uint256_add(self.balances[_to], _value)
- self.allowances[_from][_sender] = uint256_sub(allowance, _value)
+ self.balances[_from] = self.balances[_from] - _value
+ self.balances[_to] = self.balances[_to] + _value
+ self.allowances[_from][_sender] = allowance - _value
# Fire transfer event
log.Transfer(_from, _to, _value)
- return true
+ return True
@public
def approve(_spender : address, _value : uint256) -> bool:
@@ -76,7 +76,7 @@
self.allowances[_sender][_spender] = _value
# Fire approval event
log.Approval(_sender, _spender, _value)
- return true
+ return True
@public
@constant
|
{"golden_diff": "diff --git a/examples/tokens/ERC20_solidity_compatible/ERC20.v.py b/examples/tokens/ERC20_solidity_compatible/ERC20.v.py\n--- a/examples/tokens/ERC20_solidity_compatible/ERC20.v.py\n+++ b/examples/tokens/ERC20_solidity_compatible/ERC20.v.py\n@@ -21,8 +21,8 @@\n def deposit():\n _value: uint256 = convert(msg.value, 'uint256')\n _sender: address = msg.sender\n- self.balances[_sender] = uint256_add(self.balances[_sender], _value)\n- self.num_issued = uint256_add(self.num_issued, _value)\n+ self.balances[_sender] = self.balances[_sender] + _value\n+ self.num_issued = self.num_issued + _value\n # Fire deposit event as transfer from 0x0\n log.Transfer(0x0000000000000000000000000000000000000000, _sender, _value)\n \n@@ -31,12 +31,12 @@\n _sender: address = msg.sender\n # Make sure sufficient funds are present, op will not underflow supply\n # implicitly through overflow protection\n- self.balances[_sender] = uint256_sub(self.balances[_sender], _value)\n- self.num_issued = uint256_sub(self.num_issued, _value)\n+ self.balances[_sender] = self.balances[_sender] - _value\n+ self.num_issued = self.num_issued - _value\n send(_sender, as_wei_value(convert(_value, 'int128'), 'wei'))\n # Fire withdraw event as transfer to 0x0\n log.Transfer(_sender, 0x0000000000000000000000000000000000000000, _value)\n- return true\n+ return True\n \n @public\n @constant\n@@ -52,23 +52,23 @@\n def transfer(_to : address, _value : uint256) -> bool:\n _sender: address = msg.sender\n # Make sure sufficient funds are present implicitly through overflow protection\n- self.balances[_sender] = uint256_sub(self.balances[_sender], _value)\n- self.balances[_to] = uint256_add(self.balances[_to], _value)\n+ self.balances[_sender] = self.balances[_sender] - _value\n+ self.balances[_to] = self.balances[_to] + _value\n # Fire transfer event\n log.Transfer(_sender, _to, _value)\n- return true\n+ return True\n \n @public\n def transferFrom(_from : address, _to : address, _value : uint256) -> bool:\n _sender: address = msg.sender\n allowance: uint256 = self.allowances[_from][_sender]\n # Make sure sufficient funds/allowance are present implicitly through overflow protection\n- self.balances[_from] = uint256_sub(self.balances[_from], _value)\n- self.balances[_to] = uint256_add(self.balances[_to], _value)\n- self.allowances[_from][_sender] = uint256_sub(allowance, _value)\n+ self.balances[_from] = self.balances[_from] - _value\n+ self.balances[_to] = self.balances[_to] + _value\n+ self.allowances[_from][_sender] = allowance - _value\n # Fire transfer event\n log.Transfer(_from, _to, _value)\n- return true\n+ return True\n \n @public\n def approve(_spender : address, _value : uint256) -> bool:\n@@ -76,7 +76,7 @@\n self.allowances[_sender][_spender] = _value\n # Fire approval event\n log.Approval(_sender, _spender, _value)\n- return true\n+ return True\n \n @public\n @constant\n", "issue": "Solidity Compatible ERC20 broken\nThe [Solidity compatible ERC20 token](https://github.com/ethereum/vyper/blob/master/examples/tokens/ERC20_solidity_compatible/ERC20.v.py) no longer compiles, since it was not updated after the removal of separate uint256 math functions. This is a super easy fix. I can do it later in the week if no one gets to it before then. \n", "before_files": [{"content": "# Solidity-Compatible EIP20/ERC20 Token\n# Implements https://github.com/ethereum/EIPs/blob/master/EIPS/eip-20-token-standard.md\n# Author: Phil Daian\n\n# The use of the uint256 datatype as in this token is not\n# recommended, as it can pose security risks.\n\n# This token is intended as a proof of concept towards\n# language interoperability and not for production use.\n\n# Events issued by the contract\nTransfer: event({_from: indexed(address), _to: indexed(address), _value: uint256})\nApproval: event({_owner: indexed(address), _spender: indexed(address), _value: uint256})\n\nbalances: uint256[address]\nallowances: (uint256[address])[address]\nnum_issued: uint256\n\n@public\n@payable\ndef deposit():\n _value: uint256 = convert(msg.value, 'uint256')\n _sender: address = msg.sender\n self.balances[_sender] = uint256_add(self.balances[_sender], _value)\n self.num_issued = uint256_add(self.num_issued, _value)\n # Fire deposit event as transfer from 0x0\n log.Transfer(0x0000000000000000000000000000000000000000, _sender, _value)\n\n@public\ndef withdraw(_value : uint256) -> bool:\n _sender: address = msg.sender\n # Make sure sufficient funds are present, op will not underflow supply\n # implicitly through overflow protection\n self.balances[_sender] = uint256_sub(self.balances[_sender], _value)\n self.num_issued = uint256_sub(self.num_issued, _value)\n send(_sender, as_wei_value(convert(_value, 'int128'), 'wei'))\n # Fire withdraw event as transfer to 0x0\n log.Transfer(_sender, 0x0000000000000000000000000000000000000000, _value)\n return true\n\n@public\n@constant\ndef totalSupply() -> uint256:\n return self.num_issued\n\n@public\n@constant\ndef balanceOf(_owner : address) -> uint256:\n return self.balances[_owner]\n\n@public\ndef transfer(_to : address, _value : uint256) -> bool:\n _sender: address = msg.sender\n # Make sure sufficient funds are present implicitly through overflow protection\n self.balances[_sender] = uint256_sub(self.balances[_sender], _value)\n self.balances[_to] = uint256_add(self.balances[_to], _value)\n # Fire transfer event\n log.Transfer(_sender, _to, _value)\n return true\n\n@public\ndef transferFrom(_from : address, _to : address, _value : uint256) -> bool:\n _sender: address = msg.sender\n allowance: uint256 = self.allowances[_from][_sender]\n # Make sure sufficient funds/allowance are present implicitly through overflow protection\n self.balances[_from] = uint256_sub(self.balances[_from], _value)\n self.balances[_to] = uint256_add(self.balances[_to], _value)\n self.allowances[_from][_sender] = uint256_sub(allowance, _value)\n # Fire transfer event\n log.Transfer(_from, _to, _value)\n return true\n\n@public\ndef approve(_spender : address, _value : uint256) -> bool:\n _sender: address = msg.sender\n self.allowances[_sender][_spender] = _value\n # Fire approval event\n log.Approval(_sender, _spender, _value)\n return true\n\n@public\n@constant\ndef allowance(_owner : address, _spender : address) -> uint256:\n return self.allowances[_owner][_spender]\n\n", "path": "examples/tokens/ERC20_solidity_compatible/ERC20.v.py"}], "after_files": [{"content": "# Solidity-Compatible EIP20/ERC20 Token\n# Implements https://github.com/ethereum/EIPs/blob/master/EIPS/eip-20-token-standard.md\n# Author: Phil Daian\n\n# The use of the uint256 datatype as in this token is not\n# recommended, as it can pose security risks.\n\n# This token is intended as a proof of concept towards\n# language interoperability and not for production use.\n\n# Events issued by the contract\nTransfer: event({_from: indexed(address), _to: indexed(address), _value: uint256})\nApproval: event({_owner: indexed(address), _spender: indexed(address), _value: uint256})\n\nbalances: uint256[address]\nallowances: (uint256[address])[address]\nnum_issued: uint256\n\n@public\n@payable\ndef deposit():\n _value: uint256 = convert(msg.value, 'uint256')\n _sender: address = msg.sender\n self.balances[_sender] = self.balances[_sender] + _value\n self.num_issued = self.num_issued + _value\n # Fire deposit event as transfer from 0x0\n log.Transfer(0x0000000000000000000000000000000000000000, _sender, _value)\n\n@public\ndef withdraw(_value : uint256) -> bool:\n _sender: address = msg.sender\n # Make sure sufficient funds are present, op will not underflow supply\n # implicitly through overflow protection\n self.balances[_sender] = self.balances[_sender] - _value\n self.num_issued = self.num_issued - _value\n send(_sender, as_wei_value(convert(_value, 'int128'), 'wei'))\n # Fire withdraw event as transfer to 0x0\n log.Transfer(_sender, 0x0000000000000000000000000000000000000000, _value)\n return True\n\n@public\n@constant\ndef totalSupply() -> uint256:\n return self.num_issued\n\n@public\n@constant\ndef balanceOf(_owner : address) -> uint256:\n return self.balances[_owner]\n\n@public\ndef transfer(_to : address, _value : uint256) -> bool:\n _sender: address = msg.sender\n # Make sure sufficient funds are present implicitly through overflow protection\n self.balances[_sender] = self.balances[_sender] - _value\n self.balances[_to] = self.balances[_to] + _value\n # Fire transfer event\n log.Transfer(_sender, _to, _value)\n return True\n\n@public\ndef transferFrom(_from : address, _to : address, _value : uint256) -> bool:\n _sender: address = msg.sender\n allowance: uint256 = self.allowances[_from][_sender]\n # Make sure sufficient funds/allowance are present implicitly through overflow protection\n self.balances[_from] = self.balances[_from] - _value\n self.balances[_to] = self.balances[_to] + _value\n self.allowances[_from][_sender] = allowance - _value\n # Fire transfer event\n log.Transfer(_from, _to, _value)\n return True\n\n@public\ndef approve(_spender : address, _value : uint256) -> bool:\n _sender: address = msg.sender\n self.allowances[_sender][_spender] = _value\n # Fire approval event\n log.Approval(_sender, _spender, _value)\n return True\n\n@public\n@constant\ndef allowance(_owner : address, _spender : address) -> uint256:\n return self.allowances[_owner][_spender]\n\n", "path": "examples/tokens/ERC20_solidity_compatible/ERC20.v.py"}]}
| 1,475 | 989 |
gh_patches_debug_29690
|
rasdani/github-patches
|
git_diff
|
openstates__openstates-scrapers-1455
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MA: committee scraper for 2017
State: MA
says that it is skipping every page, I believe the site was rewritten and so will need a complete rewrite
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openstates/ma/committees.py`
Content:
```
1 from billy.scrape.committees import CommitteeScraper, Committee
2
3 import lxml.html
4
5
6 class MACommitteeScraper(CommitteeScraper):
7 jurisdiction = 'ma'
8
9 def scrape(self, term, chambers):
10 page_types = []
11 if 'upper' in chambers:
12 page_types += ['Senate', 'Joint']
13 if 'lower' in chambers:
14 page_types += ['House']
15 chamber_mapping = {'Senate': 'upper',
16 'House': 'lower',
17 'Joint': 'joint'}
18
19 foundComms = []
20
21 for page_type in page_types:
22 url = 'http://www.malegislature.gov/Committees/' + page_type
23
24 html = self.get(url, verify=False).text
25 doc = lxml.html.fromstring(html)
26 doc.make_links_absolute('http://www.malegislature.gov')
27
28 for com_url in doc.xpath('//ul[@class="committeeList"]/li/a/@href'):
29 chamber = chamber_mapping[page_type]
30 self.scrape_committee(chamber, com_url)
31
32 def scrape_committee(self, chamber, url):
33 html = self.get(url, verify=False).text
34 doc = lxml.html.fromstring(html)
35
36 name = doc.xpath('//span[@class="committeeShortName"]/text()')
37 if len(name) == 0:
38 self.warning("Had to skip this malformed page.")
39 return
40 # Because of http://www.malegislature.gov/Committees/Senate/S29 this
41 # XXX: hack had to be pushed in. Remove me ASAP. This just skips
42 # malformed pages.
43
44 name = name[0]
45 com = Committee(chamber, name)
46 com.add_source(url)
47
48 # get both titles and names, order is consistent
49 titles = doc.xpath('//p[@class="rankingMemberTitle"]/text()')
50 names = doc.xpath('//p[@class="rankingMemberName"]/a/text()')
51
52 for title, name in zip(titles, names):
53 com.add_member(name, title)
54
55 for member in doc.xpath('//div[@class="committeeRegularMembers"]//a/text()'):
56 com.add_member(member)
57
58 if com['members']:
59 self.save_committee(com)
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/openstates/ma/committees.py b/openstates/ma/committees.py
--- a/openstates/ma/committees.py
+++ b/openstates/ma/committees.py
@@ -16,8 +16,6 @@
'House': 'lower',
'Joint': 'joint'}
- foundComms = []
-
for page_type in page_types:
url = 'http://www.malegislature.gov/Committees/' + page_type
@@ -33,27 +31,15 @@
html = self.get(url, verify=False).text
doc = lxml.html.fromstring(html)
- name = doc.xpath('//span[@class="committeeShortName"]/text()')
- if len(name) == 0:
- self.warning("Had to skip this malformed page.")
- return
- # Because of http://www.malegislature.gov/Committees/Senate/S29 this
- # XXX: hack had to be pushed in. Remove me ASAP. This just skips
- # malformed pages.
-
- name = name[0]
+ name = doc.xpath('//title/text()')[0]
com = Committee(chamber, name)
com.add_source(url)
- # get both titles and names, order is consistent
- titles = doc.xpath('//p[@class="rankingMemberTitle"]/text()')
- names = doc.xpath('//p[@class="rankingMemberName"]/a/text()')
-
- for title, name in zip(titles, names):
- com.add_member(name, title)
-
- for member in doc.xpath('//div[@class="committeeRegularMembers"]//a/text()'):
- com.add_member(member)
+ members = doc.xpath('//a[contains(@href, "/Legislators/Profile")]')
+ for member in members:
+ title = member.xpath('../span')
+ role = title[0].text.lower() if title else 'member'
+ com.add_member(member.text, role)
if com['members']:
self.save_committee(com)
|
{"golden_diff": "diff --git a/openstates/ma/committees.py b/openstates/ma/committees.py\n--- a/openstates/ma/committees.py\n+++ b/openstates/ma/committees.py\n@@ -16,8 +16,6 @@\n 'House': 'lower',\n 'Joint': 'joint'}\n \n- foundComms = []\n-\n for page_type in page_types:\n url = 'http://www.malegislature.gov/Committees/' + page_type\n \n@@ -33,27 +31,15 @@\n html = self.get(url, verify=False).text\n doc = lxml.html.fromstring(html)\n \n- name = doc.xpath('//span[@class=\"committeeShortName\"]/text()')\n- if len(name) == 0:\n- self.warning(\"Had to skip this malformed page.\")\n- return\n- # Because of http://www.malegislature.gov/Committees/Senate/S29 this\n- # XXX: hack had to be pushed in. Remove me ASAP. This just skips\n- # malformed pages.\n-\n- name = name[0]\n+ name = doc.xpath('//title/text()')[0]\n com = Committee(chamber, name)\n com.add_source(url)\n \n- # get both titles and names, order is consistent\n- titles = doc.xpath('//p[@class=\"rankingMemberTitle\"]/text()')\n- names = doc.xpath('//p[@class=\"rankingMemberName\"]/a/text()')\n-\n- for title, name in zip(titles, names):\n- com.add_member(name, title)\n-\n- for member in doc.xpath('//div[@class=\"committeeRegularMembers\"]//a/text()'):\n- com.add_member(member)\n+ members = doc.xpath('//a[contains(@href, \"/Legislators/Profile\")]')\n+ for member in members:\n+ title = member.xpath('../span')\n+ role = title[0].text.lower() if title else 'member'\n+ com.add_member(member.text, role)\n \n if com['members']:\n self.save_committee(com)\n", "issue": "MA: committee scraper for 2017\nState: MA\r\n\r\nsays that it is skipping every page, I believe the site was rewritten and so will need a complete rewrite\n", "before_files": [{"content": "from billy.scrape.committees import CommitteeScraper, Committee\n\nimport lxml.html\n\n\nclass MACommitteeScraper(CommitteeScraper):\n jurisdiction = 'ma'\n\n def scrape(self, term, chambers):\n page_types = []\n if 'upper' in chambers:\n page_types += ['Senate', 'Joint']\n if 'lower' in chambers:\n page_types += ['House']\n chamber_mapping = {'Senate': 'upper',\n 'House': 'lower',\n 'Joint': 'joint'}\n\n foundComms = []\n\n for page_type in page_types:\n url = 'http://www.malegislature.gov/Committees/' + page_type\n\n html = self.get(url, verify=False).text\n doc = lxml.html.fromstring(html)\n doc.make_links_absolute('http://www.malegislature.gov')\n\n for com_url in doc.xpath('//ul[@class=\"committeeList\"]/li/a/@href'):\n chamber = chamber_mapping[page_type]\n self.scrape_committee(chamber, com_url)\n\n def scrape_committee(self, chamber, url):\n html = self.get(url, verify=False).text\n doc = lxml.html.fromstring(html)\n\n name = doc.xpath('//span[@class=\"committeeShortName\"]/text()')\n if len(name) == 0:\n self.warning(\"Had to skip this malformed page.\")\n return\n # Because of http://www.malegislature.gov/Committees/Senate/S29 this\n # XXX: hack had to be pushed in. Remove me ASAP. This just skips\n # malformed pages.\n\n name = name[0]\n com = Committee(chamber, name)\n com.add_source(url)\n\n # get both titles and names, order is consistent\n titles = doc.xpath('//p[@class=\"rankingMemberTitle\"]/text()')\n names = doc.xpath('//p[@class=\"rankingMemberName\"]/a/text()')\n\n for title, name in zip(titles, names):\n com.add_member(name, title)\n\n for member in doc.xpath('//div[@class=\"committeeRegularMembers\"]//a/text()'):\n com.add_member(member)\n\n if com['members']:\n self.save_committee(com)\n", "path": "openstates/ma/committees.py"}], "after_files": [{"content": "from billy.scrape.committees import CommitteeScraper, Committee\n\nimport lxml.html\n\n\nclass MACommitteeScraper(CommitteeScraper):\n jurisdiction = 'ma'\n\n def scrape(self, term, chambers):\n page_types = []\n if 'upper' in chambers:\n page_types += ['Senate', 'Joint']\n if 'lower' in chambers:\n page_types += ['House']\n chamber_mapping = {'Senate': 'upper',\n 'House': 'lower',\n 'Joint': 'joint'}\n\n for page_type in page_types:\n url = 'http://www.malegislature.gov/Committees/' + page_type\n\n html = self.get(url, verify=False).text\n doc = lxml.html.fromstring(html)\n doc.make_links_absolute('http://www.malegislature.gov')\n\n for com_url in doc.xpath('//ul[@class=\"committeeList\"]/li/a/@href'):\n chamber = chamber_mapping[page_type]\n self.scrape_committee(chamber, com_url)\n\n def scrape_committee(self, chamber, url):\n html = self.get(url, verify=False).text\n doc = lxml.html.fromstring(html)\n\n name = doc.xpath('//title/text()')[0]\n com = Committee(chamber, name)\n com.add_source(url)\n\n members = doc.xpath('//a[contains(@href, \"/Legislators/Profile\")]')\n for member in members:\n title = member.xpath('../span')\n role = title[0].text.lower() if title else 'member'\n com.add_member(member.text, role)\n\n if com['members']:\n self.save_committee(com)\n", "path": "openstates/ma/committees.py"}]}
| 893 | 456 |
gh_patches_debug_27810
|
rasdani/github-patches
|
git_diff
|
cocotb__cocotb-1898
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Provide friendlier handling of invalid COCOTB_LOG_LEVEL
When running with `COCOTB_LOG_LEVEL=debug` I get the shown stack trace.
We should be able to improve the user experience a bit by:
- Converting values to ALLCAPS, i.e. debug becomes DEBUG
- Provide a more helpful error message with allowed options instead of a stacktrace.
```
sim_build/fifo
-.--ns INFO cocotb.gpi ..mbed/gpi_embed.cpp:74 in set_program_name_in_venv Did not detect Python virtual environment. Using system-wide Python interpreter
-.--ns INFO cocotb.gpi ../gpi/GpiCommon.cpp:105 in gpi_print_registered_impl VPI registered
-.--ns INFO cocotb.gpi ..mbed/gpi_embed.cpp:245 in embed_sim_init Python interpreter initialized and cocotb loaded!
Traceback (most recent call last):
File "/home/philipp/.local/lib/python3.8/site-packages/cocotb/__init__.py", line 175, in _initialise_testbench
_setup_logging()
File "/home/philipp/.local/lib/python3.8/site-packages/cocotb/__init__.py", line 76, in _setup_logging
cocotb.log.default_config()
File "/home/philipp/.local/lib/python3.8/site-packages/cocotb/log.py", line 95, in default_config
log.setLevel(_default_log)
File "/usr/lib64/python3.8/logging/__init__.py", line 1409, in setLevel
self.level = _checkLevel(level)
File "/usr/lib64/python3.8/logging/__init__.py", line 197, in _checkLevel
raise TypeError("Level not an integer or a valid string: %r" % level)
TypeError: Level not an integer or a valid string: <function debug at 0x7f8653508430>
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cocotb/log.py`
Content:
```
1 # Copyright (c) 2013, 2018 Potential Ventures Ltd
2 # Copyright (c) 2013 SolarFlare Communications Inc
3 # All rights reserved.
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are met:
7 # * Redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer.
9 # * Redistributions in binary form must reproduce the above copyright
10 # notice, this list of conditions and the following disclaimer in the
11 # documentation and/or other materials provided with the distribution.
12 # * Neither the name of Potential Ventures Ltd,
13 # SolarFlare Communications Inc nor the
14 # names of its contributors may be used to endorse or promote products
15 # derived from this software without specific prior written permission.
16 #
17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
18 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 # DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
21 # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 """
29 Everything related to logging
30 """
31
32 import os
33 import sys
34 import logging
35 import warnings
36
37 from cocotb.utils import (
38 get_sim_time, get_time_from_sim_steps, want_color_output
39 )
40
41 import cocotb.ANSI as ANSI
42
43 if "COCOTB_REDUCED_LOG_FMT" in os.environ:
44 _suppress = True
45 else:
46 _suppress = False
47
48 # Column alignment
49 _LEVEL_CHARS = len("CRITICAL") # noqa
50 _RECORD_CHARS = 35 # noqa
51 _FILENAME_CHARS = 20 # noqa
52 _LINENO_CHARS = 4 # noqa
53 _FUNCNAME_CHARS = 31 # noqa
54
55
56 def default_config():
57 """ Apply the default cocotb log formatting to the root logger.
58
59 This hooks up the logger to write to stdout, using either
60 :class:`SimColourLogFormatter` or :class:`SimLogFormatter` depending
61 on whether colored output is requested. It also adds a
62 :class:`SimTimeContextFilter` filter so that
63 :attr:`~logging.LogRecord.created_sim_time` is available to the formatter.
64
65 The logging level for cocotb logs is set based on the
66 :envvar:`COCOTB_LOG_LEVEL` environment variable, which defaults to ``INFO``.
67
68 If desired, this logging configuration can be overwritten by calling
69 ``logging.basicConfig(..., force=True)`` (in Python 3.8 onwards), or by
70 manually resetting the root logger instance.
71 An example of this can be found in the section on :ref:`rotating-logger`.
72
73 .. versionadded:: 1.4
74 """
75 # construct an appropriate handler
76 hdlr = logging.StreamHandler(sys.stdout)
77 hdlr.addFilter(SimTimeContextFilter())
78 if want_color_output():
79 hdlr.setFormatter(SimColourLogFormatter())
80 else:
81 hdlr.setFormatter(SimLogFormatter())
82
83 logging.setLoggerClass(SimBaseLog) # For backwards compatibility
84 logging.basicConfig()
85 logging.getLogger().handlers = [hdlr] # overwrite default handlers
86
87 # apply level settings for cocotb
88 log = logging.getLogger('cocotb')
89 level = os.getenv("COCOTB_LOG_LEVEL", "INFO")
90 try:
91 _default_log = getattr(logging, level)
92 except AttributeError:
93 log.error("Unable to set logging level to %r" % level)
94 _default_log = logging.INFO
95 log.setLevel(_default_log)
96
97 # Notify GPI of log level, which it uses as an optimization to avoid
98 # calling into Python.
99 from cocotb import simulator
100 simulator.log_level(_default_log)
101
102
103 class SimBaseLog(logging.getLoggerClass()):
104 """ This class only exists for backwards compatibility """
105
106 @property
107 def logger(self):
108 warnings.warn(
109 "the .logger attribute should not be used now that `SimLog` "
110 "returns a native logger instance directly.",
111 DeprecationWarning, stacklevel=2)
112 return self
113
114 @property
115 def colour(self):
116 warnings.warn(
117 "the .colour attribute may be removed in future, use the "
118 "equivalent `cocotb.utils.want_color_output()` instead",
119 DeprecationWarning, stacklevel=2)
120 return want_color_output()
121
122
123 # this used to be a class, hence the unusual capitalization
124 def SimLog(name, ident=None):
125 """ Like logging.getLogger, but append a numeric identifier to the name """
126 if ident is not None:
127 name = "%s.0x%x" % (name, ident)
128 return logging.getLogger(name)
129
130
131 class SimTimeContextFilter(logging.Filter):
132 """
133 A filter to inject simulator times into the log records.
134
135 This uses the approach described in the :ref:`Python logging cookbook <python:filters-contextual>`.
136
137 This adds the :attr:`~logging.LogRecord.created_sim_time` attribute.
138
139 .. versionadded:: 1.4
140 """
141
142 # needed to make our docs render well
143 def __init__(self):
144 """"""
145 super().__init__()
146
147 def filter(self, record):
148 try:
149 record.created_sim_time = get_sim_time()
150 except RecursionError:
151 # get_sim_time may try to log - if that happens, we can't
152 # attach a simulator time to this message.
153 record.created_sim_time = None
154 return True
155
156
157 class SimLogFormatter(logging.Formatter):
158 """Log formatter to provide consistent log message handling.
159
160 This will only add simulator timestamps if the handler object this
161 formatter is attached to has a :class:`SimTimeContextFilter` filter
162 attached, which cocotb ensures by default.
163 """
164
165 # Removes the arguments from the base class. Docstring needed to make
166 # sphinx happy.
167 def __init__(self):
168 """ Takes no arguments. """
169 super().__init__()
170
171 # Justify and truncate
172 @staticmethod
173 def ljust(string, chars):
174 if len(string) > chars:
175 return ".." + string[(chars - 2) * -1:]
176 return string.ljust(chars)
177
178 @staticmethod
179 def rjust(string, chars):
180 if len(string) > chars:
181 return ".." + string[(chars - 2) * -1:]
182 return string.rjust(chars)
183
184 def _format(self, level, record, msg, coloured=False):
185 sim_time = getattr(record, 'created_sim_time', None)
186 if sim_time is None:
187 sim_time_str = " -.--ns"
188 else:
189 time_ns = get_time_from_sim_steps(sim_time, 'ns')
190 sim_time_str = "{:6.2f}ns".format(time_ns)
191 prefix = sim_time_str.rjust(11) + ' ' + level + ' '
192 if not _suppress:
193 prefix += self.ljust(record.name, _RECORD_CHARS) + \
194 self.rjust(os.path.split(record.filename)[1], _FILENAME_CHARS) + \
195 ':' + self.ljust(str(record.lineno), _LINENO_CHARS) + \
196 ' in ' + self.ljust(str(record.funcName), _FUNCNAME_CHARS) + ' '
197
198 # these lines are copied from the builtin logger
199 if record.exc_info:
200 # Cache the traceback text to avoid converting it multiple times
201 # (it's constant anyway)
202 if not record.exc_text:
203 record.exc_text = self.formatException(record.exc_info)
204 if record.exc_text:
205 if msg[-1:] != "\n":
206 msg = msg + "\n"
207 msg = msg + record.exc_text
208
209 prefix_len = len(prefix)
210 if coloured:
211 prefix_len -= (len(level) - _LEVEL_CHARS)
212 pad = "\n" + " " * (prefix_len)
213 return prefix + pad.join(msg.split('\n'))
214
215 def format(self, record):
216 """Prettify the log output, annotate with simulation time"""
217
218 msg = record.getMessage()
219 level = record.levelname.ljust(_LEVEL_CHARS)
220
221 return self._format(level, record, msg)
222
223
224 class SimColourLogFormatter(SimLogFormatter):
225 """Log formatter to provide consistent log message handling."""
226
227 loglevel2colour = {
228 logging.DEBUG : "%s",
229 logging.INFO : ANSI.COLOR_INFO + "%s" + ANSI.COLOR_DEFAULT,
230 logging.WARNING : ANSI.COLOR_WARNING + "%s" + ANSI.COLOR_DEFAULT,
231 logging.ERROR : ANSI.COLOR_ERROR + "%s" + ANSI.COLOR_DEFAULT,
232 logging.CRITICAL: ANSI.COLOR_CRITICAL + "%s" + ANSI.COLOR_DEFAULT,
233 }
234
235 def format(self, record):
236 """Prettify the log output, annotate with simulation time"""
237
238 msg = record.getMessage()
239
240 # Need to colour each line in case coloring is applied in the message
241 msg = '\n'.join([SimColourLogFormatter.loglevel2colour[record.levelno] % line for line in msg.split('\n')])
242 level = (SimColourLogFormatter.loglevel2colour[record.levelno] %
243 record.levelname.ljust(_LEVEL_CHARS))
244
245 return self._format(level, record, msg, coloured=True)
246
247
248 def _filter_from_c(logger_name, level):
249 return logging.getLogger(logger_name).isEnabledFor(level)
250
251
252 def _log_from_c(logger_name, level, filename, lineno, msg, function_name):
253 """
254 This is for use from the C world, and allows us to insert C stack
255 information.
256 """
257 logger = logging.getLogger(logger_name)
258 if logger.isEnabledFor(level):
259 record = logger.makeRecord(
260 logger.name,
261 level,
262 filename,
263 lineno,
264 msg,
265 None,
266 None,
267 function_name
268 )
269 logger.handle(record)
270
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cocotb/log.py b/cocotb/log.py
--- a/cocotb/log.py
+++ b/cocotb/log.py
@@ -52,6 +52,9 @@
_LINENO_CHARS = 4 # noqa
_FUNCNAME_CHARS = 31 # noqa
+# Default log level if not overwritten by the user.
+_COCOTB_LOG_LEVEL_DEFAULT = "INFO"
+
def default_config():
""" Apply the default cocotb log formatting to the root logger.
@@ -86,18 +89,25 @@
# apply level settings for cocotb
log = logging.getLogger('cocotb')
- level = os.getenv("COCOTB_LOG_LEVEL", "INFO")
+
+ try:
+ # All log levels are upper case, convert the user input for convenience.
+ level = os.environ["COCOTB_LOG_LEVEL"].upper()
+ except KeyError:
+ level = _COCOTB_LOG_LEVEL_DEFAULT
+
try:
- _default_log = getattr(logging, level)
- except AttributeError:
- log.error("Unable to set logging level to %r" % level)
- _default_log = logging.INFO
- log.setLevel(_default_log)
+ log.setLevel(level)
+ except ValueError:
+ valid_levels = ('CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG')
+ raise ValueError("Invalid log level %r passed through the "
+ "COCOTB_LOG_LEVEL environment variable. Valid log "
+ "levels: %s" % (level, ', '.join(valid_levels)))
# Notify GPI of log level, which it uses as an optimization to avoid
# calling into Python.
from cocotb import simulator
- simulator.log_level(_default_log)
+ simulator.log_level(log.getEffectiveLevel())
class SimBaseLog(logging.getLoggerClass()):
|
{"golden_diff": "diff --git a/cocotb/log.py b/cocotb/log.py\n--- a/cocotb/log.py\n+++ b/cocotb/log.py\n@@ -52,6 +52,9 @@\n _LINENO_CHARS = 4 # noqa\n _FUNCNAME_CHARS = 31 # noqa\n \n+# Default log level if not overwritten by the user.\n+_COCOTB_LOG_LEVEL_DEFAULT = \"INFO\"\n+\n \n def default_config():\n \"\"\" Apply the default cocotb log formatting to the root logger.\n@@ -86,18 +89,25 @@\n \n # apply level settings for cocotb\n log = logging.getLogger('cocotb')\n- level = os.getenv(\"COCOTB_LOG_LEVEL\", \"INFO\")\n+\n+ try:\n+ # All log levels are upper case, convert the user input for convenience.\n+ level = os.environ[\"COCOTB_LOG_LEVEL\"].upper()\n+ except KeyError:\n+ level = _COCOTB_LOG_LEVEL_DEFAULT\n+\n try:\n- _default_log = getattr(logging, level)\n- except AttributeError:\n- log.error(\"Unable to set logging level to %r\" % level)\n- _default_log = logging.INFO\n- log.setLevel(_default_log)\n+ log.setLevel(level)\n+ except ValueError:\n+ valid_levels = ('CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG')\n+ raise ValueError(\"Invalid log level %r passed through the \"\n+ \"COCOTB_LOG_LEVEL environment variable. Valid log \"\n+ \"levels: %s\" % (level, ', '.join(valid_levels)))\n \n # Notify GPI of log level, which it uses as an optimization to avoid\n # calling into Python.\n from cocotb import simulator\n- simulator.log_level(_default_log)\n+ simulator.log_level(log.getEffectiveLevel())\n \n \n class SimBaseLog(logging.getLoggerClass()):\n", "issue": "Provide friendlier handling of invalid COCOTB_LOG_LEVEL\nWhen running with `COCOTB_LOG_LEVEL=debug` I get the shown stack trace.\r\n\r\nWe should be able to improve the user experience a bit by:\r\n- Converting values to ALLCAPS, i.e. debug becomes DEBUG\r\n- Provide a more helpful error message with allowed options instead of a stacktrace.\r\n\r\n\r\n```\r\n sim_build/fifo \r\n -.--ns INFO cocotb.gpi ..mbed/gpi_embed.cpp:74 in set_program_name_in_venv Did not detect Python virtual environment. Using system-wide Python interpreter\r\n -.--ns INFO cocotb.gpi ../gpi/GpiCommon.cpp:105 in gpi_print_registered_impl VPI registered\r\n -.--ns INFO cocotb.gpi ..mbed/gpi_embed.cpp:245 in embed_sim_init Python interpreter initialized and cocotb loaded!\r\nTraceback (most recent call last):\r\n File \"/home/philipp/.local/lib/python3.8/site-packages/cocotb/__init__.py\", line 175, in _initialise_testbench\r\n _setup_logging()\r\n File \"/home/philipp/.local/lib/python3.8/site-packages/cocotb/__init__.py\", line 76, in _setup_logging\r\n cocotb.log.default_config()\r\n File \"/home/philipp/.local/lib/python3.8/site-packages/cocotb/log.py\", line 95, in default_config\r\n log.setLevel(_default_log)\r\n File \"/usr/lib64/python3.8/logging/__init__.py\", line 1409, in setLevel\r\n self.level = _checkLevel(level)\r\n File \"/usr/lib64/python3.8/logging/__init__.py\", line 197, in _checkLevel\r\n raise TypeError(\"Level not an integer or a valid string: %r\" % level)\r\nTypeError: Level not an integer or a valid string: <function debug at 0x7f8653508430>\r\n```\n", "before_files": [{"content": "# Copyright (c) 2013, 2018 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\nEverything related to logging\n\"\"\"\n\nimport os\nimport sys\nimport logging\nimport warnings\n\nfrom cocotb.utils import (\n get_sim_time, get_time_from_sim_steps, want_color_output\n)\n\nimport cocotb.ANSI as ANSI\n\nif \"COCOTB_REDUCED_LOG_FMT\" in os.environ:\n _suppress = True\nelse:\n _suppress = False\n\n# Column alignment\n_LEVEL_CHARS = len(\"CRITICAL\") # noqa\n_RECORD_CHARS = 35 # noqa\n_FILENAME_CHARS = 20 # noqa\n_LINENO_CHARS = 4 # noqa\n_FUNCNAME_CHARS = 31 # noqa\n\n\ndef default_config():\n \"\"\" Apply the default cocotb log formatting to the root logger.\n\n This hooks up the logger to write to stdout, using either\n :class:`SimColourLogFormatter` or :class:`SimLogFormatter` depending\n on whether colored output is requested. It also adds a\n :class:`SimTimeContextFilter` filter so that\n :attr:`~logging.LogRecord.created_sim_time` is available to the formatter.\n\n The logging level for cocotb logs is set based on the\n :envvar:`COCOTB_LOG_LEVEL` environment variable, which defaults to ``INFO``.\n\n If desired, this logging configuration can be overwritten by calling\n ``logging.basicConfig(..., force=True)`` (in Python 3.8 onwards), or by\n manually resetting the root logger instance.\n An example of this can be found in the section on :ref:`rotating-logger`.\n\n .. versionadded:: 1.4\n \"\"\"\n # construct an appropriate handler\n hdlr = logging.StreamHandler(sys.stdout)\n hdlr.addFilter(SimTimeContextFilter())\n if want_color_output():\n hdlr.setFormatter(SimColourLogFormatter())\n else:\n hdlr.setFormatter(SimLogFormatter())\n\n logging.setLoggerClass(SimBaseLog) # For backwards compatibility\n logging.basicConfig()\n logging.getLogger().handlers = [hdlr] # overwrite default handlers\n\n # apply level settings for cocotb\n log = logging.getLogger('cocotb')\n level = os.getenv(\"COCOTB_LOG_LEVEL\", \"INFO\")\n try:\n _default_log = getattr(logging, level)\n except AttributeError:\n log.error(\"Unable to set logging level to %r\" % level)\n _default_log = logging.INFO\n log.setLevel(_default_log)\n\n # Notify GPI of log level, which it uses as an optimization to avoid\n # calling into Python.\n from cocotb import simulator\n simulator.log_level(_default_log)\n\n\nclass SimBaseLog(logging.getLoggerClass()):\n \"\"\" This class only exists for backwards compatibility \"\"\"\n\n @property\n def logger(self):\n warnings.warn(\n \"the .logger attribute should not be used now that `SimLog` \"\n \"returns a native logger instance directly.\",\n DeprecationWarning, stacklevel=2)\n return self\n\n @property\n def colour(self):\n warnings.warn(\n \"the .colour attribute may be removed in future, use the \"\n \"equivalent `cocotb.utils.want_color_output()` instead\",\n DeprecationWarning, stacklevel=2)\n return want_color_output()\n\n\n# this used to be a class, hence the unusual capitalization\ndef SimLog(name, ident=None):\n \"\"\" Like logging.getLogger, but append a numeric identifier to the name \"\"\"\n if ident is not None:\n name = \"%s.0x%x\" % (name, ident)\n return logging.getLogger(name)\n\n\nclass SimTimeContextFilter(logging.Filter):\n \"\"\"\n A filter to inject simulator times into the log records.\n\n This uses the approach described in the :ref:`Python logging cookbook <python:filters-contextual>`.\n\n This adds the :attr:`~logging.LogRecord.created_sim_time` attribute.\n\n .. versionadded:: 1.4\n \"\"\"\n\n # needed to make our docs render well\n def __init__(self):\n \"\"\"\"\"\"\n super().__init__()\n\n def filter(self, record):\n try:\n record.created_sim_time = get_sim_time()\n except RecursionError:\n # get_sim_time may try to log - if that happens, we can't\n # attach a simulator time to this message.\n record.created_sim_time = None\n return True\n\n\nclass SimLogFormatter(logging.Formatter):\n \"\"\"Log formatter to provide consistent log message handling.\n\n This will only add simulator timestamps if the handler object this\n formatter is attached to has a :class:`SimTimeContextFilter` filter\n attached, which cocotb ensures by default.\n \"\"\"\n\n # Removes the arguments from the base class. Docstring needed to make\n # sphinx happy.\n def __init__(self):\n \"\"\" Takes no arguments. \"\"\"\n super().__init__()\n\n # Justify and truncate\n @staticmethod\n def ljust(string, chars):\n if len(string) > chars:\n return \"..\" + string[(chars - 2) * -1:]\n return string.ljust(chars)\n\n @staticmethod\n def rjust(string, chars):\n if len(string) > chars:\n return \"..\" + string[(chars - 2) * -1:]\n return string.rjust(chars)\n\n def _format(self, level, record, msg, coloured=False):\n sim_time = getattr(record, 'created_sim_time', None)\n if sim_time is None:\n sim_time_str = \" -.--ns\"\n else:\n time_ns = get_time_from_sim_steps(sim_time, 'ns')\n sim_time_str = \"{:6.2f}ns\".format(time_ns)\n prefix = sim_time_str.rjust(11) + ' ' + level + ' '\n if not _suppress:\n prefix += self.ljust(record.name, _RECORD_CHARS) + \\\n self.rjust(os.path.split(record.filename)[1], _FILENAME_CHARS) + \\\n ':' + self.ljust(str(record.lineno), _LINENO_CHARS) + \\\n ' in ' + self.ljust(str(record.funcName), _FUNCNAME_CHARS) + ' '\n\n # these lines are copied from the builtin logger\n if record.exc_info:\n # Cache the traceback text to avoid converting it multiple times\n # (it's constant anyway)\n if not record.exc_text:\n record.exc_text = self.formatException(record.exc_info)\n if record.exc_text:\n if msg[-1:] != \"\\n\":\n msg = msg + \"\\n\"\n msg = msg + record.exc_text\n\n prefix_len = len(prefix)\n if coloured:\n prefix_len -= (len(level) - _LEVEL_CHARS)\n pad = \"\\n\" + \" \" * (prefix_len)\n return prefix + pad.join(msg.split('\\n'))\n\n def format(self, record):\n \"\"\"Prettify the log output, annotate with simulation time\"\"\"\n\n msg = record.getMessage()\n level = record.levelname.ljust(_LEVEL_CHARS)\n\n return self._format(level, record, msg)\n\n\nclass SimColourLogFormatter(SimLogFormatter):\n \"\"\"Log formatter to provide consistent log message handling.\"\"\"\n\n loglevel2colour = {\n logging.DEBUG : \"%s\",\n logging.INFO : ANSI.COLOR_INFO + \"%s\" + ANSI.COLOR_DEFAULT,\n logging.WARNING : ANSI.COLOR_WARNING + \"%s\" + ANSI.COLOR_DEFAULT,\n logging.ERROR : ANSI.COLOR_ERROR + \"%s\" + ANSI.COLOR_DEFAULT,\n logging.CRITICAL: ANSI.COLOR_CRITICAL + \"%s\" + ANSI.COLOR_DEFAULT,\n }\n\n def format(self, record):\n \"\"\"Prettify the log output, annotate with simulation time\"\"\"\n\n msg = record.getMessage()\n\n # Need to colour each line in case coloring is applied in the message\n msg = '\\n'.join([SimColourLogFormatter.loglevel2colour[record.levelno] % line for line in msg.split('\\n')])\n level = (SimColourLogFormatter.loglevel2colour[record.levelno] %\n record.levelname.ljust(_LEVEL_CHARS))\n\n return self._format(level, record, msg, coloured=True)\n\n\ndef _filter_from_c(logger_name, level):\n return logging.getLogger(logger_name).isEnabledFor(level)\n\n\ndef _log_from_c(logger_name, level, filename, lineno, msg, function_name):\n \"\"\"\n This is for use from the C world, and allows us to insert C stack\n information.\n \"\"\"\n logger = logging.getLogger(logger_name)\n if logger.isEnabledFor(level):\n record = logger.makeRecord(\n logger.name,\n level,\n filename,\n lineno,\n msg,\n None,\n None,\n function_name\n )\n logger.handle(record)\n", "path": "cocotb/log.py"}], "after_files": [{"content": "# Copyright (c) 2013, 2018 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\nEverything related to logging\n\"\"\"\n\nimport os\nimport sys\nimport logging\nimport warnings\n\nfrom cocotb.utils import (\n get_sim_time, get_time_from_sim_steps, want_color_output\n)\n\nimport cocotb.ANSI as ANSI\n\nif \"COCOTB_REDUCED_LOG_FMT\" in os.environ:\n _suppress = True\nelse:\n _suppress = False\n\n# Column alignment\n_LEVEL_CHARS = len(\"CRITICAL\") # noqa\n_RECORD_CHARS = 35 # noqa\n_FILENAME_CHARS = 20 # noqa\n_LINENO_CHARS = 4 # noqa\n_FUNCNAME_CHARS = 31 # noqa\n\n# Default log level if not overwritten by the user.\n_COCOTB_LOG_LEVEL_DEFAULT = \"INFO\"\n\n\ndef default_config():\n \"\"\" Apply the default cocotb log formatting to the root logger.\n\n This hooks up the logger to write to stdout, using either\n :class:`SimColourLogFormatter` or :class:`SimLogFormatter` depending\n on whether colored output is requested. It also adds a\n :class:`SimTimeContextFilter` filter so that\n :attr:`~logging.LogRecord.created_sim_time` is available to the formatter.\n\n The logging level for cocotb logs is set based on the\n :envvar:`COCOTB_LOG_LEVEL` environment variable, which defaults to ``INFO``.\n\n If desired, this logging configuration can be overwritten by calling\n ``logging.basicConfig(..., force=True)`` (in Python 3.8 onwards), or by\n manually resetting the root logger instance.\n An example of this can be found in the section on :ref:`rotating-logger`.\n\n .. versionadded:: 1.4\n \"\"\"\n # construct an appropriate handler\n hdlr = logging.StreamHandler(sys.stdout)\n hdlr.addFilter(SimTimeContextFilter())\n if want_color_output():\n hdlr.setFormatter(SimColourLogFormatter())\n else:\n hdlr.setFormatter(SimLogFormatter())\n\n logging.setLoggerClass(SimBaseLog) # For backwards compatibility\n logging.basicConfig()\n logging.getLogger().handlers = [hdlr] # overwrite default handlers\n\n # apply level settings for cocotb\n log = logging.getLogger('cocotb')\n\n try:\n # All log levels are upper case, convert the user input for convenience.\n level = os.environ[\"COCOTB_LOG_LEVEL\"].upper()\n except KeyError:\n level = _COCOTB_LOG_LEVEL_DEFAULT\n\n try:\n log.setLevel(level)\n except ValueError:\n valid_levels = ('CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG')\n raise ValueError(\"Invalid log level %r passed through the \"\n \"COCOTB_LOG_LEVEL environment variable. Valid log \"\n \"levels: %s\" % (level, ', '.join(valid_levels)))\n\n # Notify GPI of log level, which it uses as an optimization to avoid\n # calling into Python.\n from cocotb import simulator\n simulator.log_level(log.getEffectiveLevel())\n\n\nclass SimBaseLog(logging.getLoggerClass()):\n \"\"\" This class only exists for backwards compatibility \"\"\"\n\n @property\n def logger(self):\n warnings.warn(\n \"the .logger attribute should not be used now that `SimLog` \"\n \"returns a native logger instance directly.\",\n DeprecationWarning, stacklevel=2)\n return self\n\n @property\n def colour(self):\n warnings.warn(\n \"the .colour attribute may be removed in future, use the \"\n \"equivalent `cocotb.utils.want_color_output()` instead\",\n DeprecationWarning, stacklevel=2)\n return want_color_output()\n\n\n# this used to be a class, hence the unusual capitalization\ndef SimLog(name, ident=None):\n \"\"\" Like logging.getLogger, but append a numeric identifier to the name \"\"\"\n if ident is not None:\n name = \"%s.0x%x\" % (name, ident)\n return logging.getLogger(name)\n\n\nclass SimTimeContextFilter(logging.Filter):\n \"\"\"\n A filter to inject simulator times into the log records.\n\n This uses the approach described in the :ref:`Python logging cookbook <python:filters-contextual>`.\n\n This adds the :attr:`~logging.LogRecord.created_sim_time` attribute.\n\n .. versionadded:: 1.4\n \"\"\"\n\n # needed to make our docs render well\n def __init__(self):\n \"\"\"\"\"\"\n super().__init__()\n\n def filter(self, record):\n try:\n record.created_sim_time = get_sim_time()\n except RecursionError:\n # get_sim_time may try to log - if that happens, we can't\n # attach a simulator time to this message.\n record.created_sim_time = None\n return True\n\n\nclass SimLogFormatter(logging.Formatter):\n \"\"\"Log formatter to provide consistent log message handling.\n\n This will only add simulator timestamps if the handler object this\n formatter is attached to has a :class:`SimTimeContextFilter` filter\n attached, which cocotb ensures by default.\n \"\"\"\n\n # Removes the arguments from the base class. Docstring needed to make\n # sphinx happy.\n def __init__(self):\n \"\"\" Takes no arguments. \"\"\"\n super().__init__()\n\n # Justify and truncate\n @staticmethod\n def ljust(string, chars):\n if len(string) > chars:\n return \"..\" + string[(chars - 2) * -1:]\n return string.ljust(chars)\n\n @staticmethod\n def rjust(string, chars):\n if len(string) > chars:\n return \"..\" + string[(chars - 2) * -1:]\n return string.rjust(chars)\n\n def _format(self, level, record, msg, coloured=False):\n sim_time = getattr(record, 'created_sim_time', None)\n if sim_time is None:\n sim_time_str = \" -.--ns\"\n else:\n time_ns = get_time_from_sim_steps(sim_time, 'ns')\n sim_time_str = \"{:6.2f}ns\".format(time_ns)\n prefix = sim_time_str.rjust(11) + ' ' + level + ' '\n if not _suppress:\n prefix += self.ljust(record.name, _RECORD_CHARS) + \\\n self.rjust(os.path.split(record.filename)[1], _FILENAME_CHARS) + \\\n ':' + self.ljust(str(record.lineno), _LINENO_CHARS) + \\\n ' in ' + self.ljust(str(record.funcName), _FUNCNAME_CHARS) + ' '\n\n # these lines are copied from the builtin logger\n if record.exc_info:\n # Cache the traceback text to avoid converting it multiple times\n # (it's constant anyway)\n if not record.exc_text:\n record.exc_text = self.formatException(record.exc_info)\n if record.exc_text:\n if msg[-1:] != \"\\n\":\n msg = msg + \"\\n\"\n msg = msg + record.exc_text\n\n prefix_len = len(prefix)\n if coloured:\n prefix_len -= (len(level) - _LEVEL_CHARS)\n pad = \"\\n\" + \" \" * (prefix_len)\n return prefix + pad.join(msg.split('\\n'))\n\n def format(self, record):\n \"\"\"Prettify the log output, annotate with simulation time\"\"\"\n\n msg = record.getMessage()\n level = record.levelname.ljust(_LEVEL_CHARS)\n\n return self._format(level, record, msg)\n\n\nclass SimColourLogFormatter(SimLogFormatter):\n \"\"\"Log formatter to provide consistent log message handling.\"\"\"\n\n loglevel2colour = {\n logging.DEBUG : \"%s\",\n logging.INFO : ANSI.COLOR_INFO + \"%s\" + ANSI.COLOR_DEFAULT,\n logging.WARNING : ANSI.COLOR_WARNING + \"%s\" + ANSI.COLOR_DEFAULT,\n logging.ERROR : ANSI.COLOR_ERROR + \"%s\" + ANSI.COLOR_DEFAULT,\n logging.CRITICAL: ANSI.COLOR_CRITICAL + \"%s\" + ANSI.COLOR_DEFAULT,\n }\n\n def format(self, record):\n \"\"\"Prettify the log output, annotate with simulation time\"\"\"\n\n msg = record.getMessage()\n\n # Need to colour each line in case coloring is applied in the message\n msg = '\\n'.join([SimColourLogFormatter.loglevel2colour[record.levelno] % line for line in msg.split('\\n')])\n level = (SimColourLogFormatter.loglevel2colour[record.levelno] %\n record.levelname.ljust(_LEVEL_CHARS))\n\n return self._format(level, record, msg, coloured=True)\n\n\ndef _filter_from_c(logger_name, level):\n return logging.getLogger(logger_name).isEnabledFor(level)\n\n\ndef _log_from_c(logger_name, level, filename, lineno, msg, function_name):\n \"\"\"\n This is for use from the C world, and allows us to insert C stack\n information.\n \"\"\"\n logger = logging.getLogger(logger_name)\n if logger.isEnabledFor(level):\n record = logger.makeRecord(\n logger.name,\n level,\n filename,\n lineno,\n msg,\n None,\n None,\n function_name\n )\n logger.handle(record)\n", "path": "cocotb/log.py"}]}
| 3,662 | 417 |
gh_patches_debug_6239
|
rasdani/github-patches
|
git_diff
|
searx__searx-1800
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[flickr_noapi] UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 1: ordinal not in range(128)
Similar to #419
Installation: current master commit
How to reproduce? Search for "kek" on https://search.snopyta.org/ and click on "Images"
```
ERROR:flask.app:Exception on / [POST]
Traceback (most recent call last):
File "/usr/local/searx/searx-ve/local/lib/python2.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/searx/searx-ve/local/lib/python2.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/searx/searx-ve/local/lib/python2.7/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/searx/searx-ve/local/lib/python2.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/searx/searx-ve/local/lib/python2.7/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/searx/searx/webapp.py", line 544, in index
result['title'] = highlight_content(escape(result['title'] or u''), search_query.query)
File "/usr/local/searx/searx/utils.py", line 79, in highlight_content
if content.lower().find(query.lower()) > -1:
UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 1: ordinal not in range(128)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/flickr_noapi.py`
Content:
```
1 #!/usr/bin/env python
2
3 """
4 Flickr (Images)
5
6 @website https://www.flickr.com
7 @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)
8
9 @using-api no
10 @results HTML
11 @stable no
12 @parse url, title, thumbnail, img_src
13 """
14
15 from json import loads
16 from time import time
17 import re
18 from searx.engines import logger
19 from searx.url_utils import urlencode
20 from searx.utils import ecma_unescape, html_to_text
21
22 logger = logger.getChild('flickr-noapi')
23
24 categories = ['images']
25
26 url = 'https://www.flickr.com/'
27 search_url = url + 'search?{query}&page={page}'
28 time_range_url = '&min_upload_date={start}&max_upload_date={end}'
29 photo_url = 'https://www.flickr.com/photos/{userid}/{photoid}'
30 modelexport_re = re.compile(r"^\s*modelExport:\s*({.*}),$", re.M)
31 image_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's')
32
33 paging = True
34 time_range_support = True
35 time_range_dict = {'day': 60 * 60 * 24,
36 'week': 60 * 60 * 24 * 7,
37 'month': 60 * 60 * 24 * 7 * 4,
38 'year': 60 * 60 * 24 * 7 * 52}
39
40
41 def build_flickr_url(user_id, photo_id):
42 return photo_url.format(userid=user_id, photoid=photo_id)
43
44
45 def _get_time_range_url(time_range):
46 if time_range in time_range_dict:
47 return time_range_url.format(start=time(), end=str(int(time()) - time_range_dict[time_range]))
48 return ''
49
50
51 def request(query, params):
52 params['url'] = (search_url.format(query=urlencode({'text': query}), page=params['pageno'])
53 + _get_time_range_url(params['time_range']))
54 return params
55
56
57 def response(resp):
58 results = []
59
60 matches = modelexport_re.search(resp.text)
61
62 if matches is None:
63 return results
64
65 match = matches.group(1)
66 model_export = loads(match)
67
68 if 'legend' not in model_export:
69 return results
70
71 legend = model_export['legend']
72
73 # handle empty page
74 if not legend or not legend[0]:
75 return results
76
77 for index in legend:
78 photo = model_export['main'][index[0]][int(index[1])][index[2]][index[3]][int(index[4])]
79 author = ecma_unescape(photo.get('realname', ''))
80 source = ecma_unescape(photo.get('username', '')) + ' @ Flickr'
81 title = ecma_unescape(photo.get('title', ''))
82 content = html_to_text(ecma_unescape(photo.get('description', '')))
83 img_src = None
84 # From the biggest to the lowest format
85 for image_size in image_sizes:
86 if image_size in photo['sizes']:
87 img_src = photo['sizes'][image_size]['url']
88 img_format = 'jpg ' \
89 + str(photo['sizes'][image_size]['width']) \
90 + 'x' \
91 + str(photo['sizes'][image_size]['height'])
92 break
93
94 if not img_src:
95 logger.debug('cannot find valid image size: {0}'.format(repr(photo)))
96 continue
97
98 # For a bigger thumbnail, keep only the url_z, not the url_n
99 if 'n' in photo['sizes']:
100 thumbnail_src = photo['sizes']['n']['url']
101 elif 'z' in photo['sizes']:
102 thumbnail_src = photo['sizes']['z']['url']
103 else:
104 thumbnail_src = img_src
105
106 if 'ownerNsid' not in photo:
107 # should not happen, disowned photo? Show it anyway
108 url = img_src
109 else:
110 url = build_flickr_url(photo['ownerNsid'], photo['id'])
111
112 result = {
113 'url': url,
114 'img_src': img_src,
115 'thumbnail_src': thumbnail_src,
116 'source': source,
117 'img_format': img_format,
118 'template': 'images.html'
119 }
120 try:
121 result['author'] = author.encode('utf-8')
122 result['title'] = title.encode('utf-8')
123 result['content'] = content.encode('utf-8')
124 except:
125 result['author'] = ''
126 result['title'] = ''
127 result['content'] = ''
128 results.append(result)
129
130 return results
131
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/searx/engines/flickr_noapi.py b/searx/engines/flickr_noapi.py
--- a/searx/engines/flickr_noapi.py
+++ b/searx/engines/flickr_noapi.py
@@ -118,9 +118,9 @@
'template': 'images.html'
}
try:
- result['author'] = author.encode('utf-8')
- result['title'] = title.encode('utf-8')
- result['content'] = content.encode('utf-8')
+ result['author'] = author
+ result['title'] = title
+ result['content'] = content
except:
result['author'] = ''
result['title'] = ''
|
{"golden_diff": "diff --git a/searx/engines/flickr_noapi.py b/searx/engines/flickr_noapi.py\n--- a/searx/engines/flickr_noapi.py\n+++ b/searx/engines/flickr_noapi.py\n@@ -118,9 +118,9 @@\n 'template': 'images.html'\n }\n try:\n- result['author'] = author.encode('utf-8')\n- result['title'] = title.encode('utf-8')\n- result['content'] = content.encode('utf-8')\n+ result['author'] = author\n+ result['title'] = title\n+ result['content'] = content\n except:\n result['author'] = ''\n result['title'] = ''\n", "issue": "[flickr_noapi] UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 1: ordinal not in range(128)\nSimilar to #419\r\n\r\nInstallation: current master commit\r\nHow to reproduce? Search for \"kek\" on https://search.snopyta.org/ and click on \"Images\"\r\n\r\n```\r\nERROR:flask.app:Exception on / [POST]\r\nTraceback (most recent call last):\r\n File \"/usr/local/searx/searx-ve/local/lib/python2.7/site-packages/flask/app.py\", line 2292, in wsgi_app\r\n response = self.full_dispatch_request()\r\n File \"/usr/local/searx/searx-ve/local/lib/python2.7/site-packages/flask/app.py\", line 1815, in full_dispatch_request\r\n rv = self.handle_user_exception(e)\r\n File \"/usr/local/searx/searx-ve/local/lib/python2.7/site-packages/flask/app.py\", line 1718, in handle_user_exception\r\n reraise(exc_type, exc_value, tb)\r\n File \"/usr/local/searx/searx-ve/local/lib/python2.7/site-packages/flask/app.py\", line 1813, in full_dispatch_request\r\n rv = self.dispatch_request()\r\n File \"/usr/local/searx/searx-ve/local/lib/python2.7/site-packages/flask/app.py\", line 1799, in dispatch_request\r\n return self.view_functions[rule.endpoint](**req.view_args)\r\n File \"/usr/local/searx/searx/webapp.py\", line 544, in index\r\n result['title'] = highlight_content(escape(result['title'] or u''), search_query.query)\r\n File \"/usr/local/searx/searx/utils.py\", line 79, in highlight_content\r\n if content.lower().find(query.lower()) > -1:\r\nUnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 1: ordinal not in range(128)\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\n Flickr (Images)\n\n @website https://www.flickr.com\n @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)\n\n @using-api no\n @results HTML\n @stable no\n @parse url, title, thumbnail, img_src\n\"\"\"\n\nfrom json import loads\nfrom time import time\nimport re\nfrom searx.engines import logger\nfrom searx.url_utils import urlencode\nfrom searx.utils import ecma_unescape, html_to_text\n\nlogger = logger.getChild('flickr-noapi')\n\ncategories = ['images']\n\nurl = 'https://www.flickr.com/'\nsearch_url = url + 'search?{query}&page={page}'\ntime_range_url = '&min_upload_date={start}&max_upload_date={end}'\nphoto_url = 'https://www.flickr.com/photos/{userid}/{photoid}'\nmodelexport_re = re.compile(r\"^\\s*modelExport:\\s*({.*}),$\", re.M)\nimage_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's')\n\npaging = True\ntime_range_support = True\ntime_range_dict = {'day': 60 * 60 * 24,\n 'week': 60 * 60 * 24 * 7,\n 'month': 60 * 60 * 24 * 7 * 4,\n 'year': 60 * 60 * 24 * 7 * 52}\n\n\ndef build_flickr_url(user_id, photo_id):\n return photo_url.format(userid=user_id, photoid=photo_id)\n\n\ndef _get_time_range_url(time_range):\n if time_range in time_range_dict:\n return time_range_url.format(start=time(), end=str(int(time()) - time_range_dict[time_range]))\n return ''\n\n\ndef request(query, params):\n params['url'] = (search_url.format(query=urlencode({'text': query}), page=params['pageno'])\n + _get_time_range_url(params['time_range']))\n return params\n\n\ndef response(resp):\n results = []\n\n matches = modelexport_re.search(resp.text)\n\n if matches is None:\n return results\n\n match = matches.group(1)\n model_export = loads(match)\n\n if 'legend' not in model_export:\n return results\n\n legend = model_export['legend']\n\n # handle empty page\n if not legend or not legend[0]:\n return results\n\n for index in legend:\n photo = model_export['main'][index[0]][int(index[1])][index[2]][index[3]][int(index[4])]\n author = ecma_unescape(photo.get('realname', ''))\n source = ecma_unescape(photo.get('username', '')) + ' @ Flickr'\n title = ecma_unescape(photo.get('title', ''))\n content = html_to_text(ecma_unescape(photo.get('description', '')))\n img_src = None\n # From the biggest to the lowest format\n for image_size in image_sizes:\n if image_size in photo['sizes']:\n img_src = photo['sizes'][image_size]['url']\n img_format = 'jpg ' \\\n + str(photo['sizes'][image_size]['width']) \\\n + 'x' \\\n + str(photo['sizes'][image_size]['height'])\n break\n\n if not img_src:\n logger.debug('cannot find valid image size: {0}'.format(repr(photo)))\n continue\n\n # For a bigger thumbnail, keep only the url_z, not the url_n\n if 'n' in photo['sizes']:\n thumbnail_src = photo['sizes']['n']['url']\n elif 'z' in photo['sizes']:\n thumbnail_src = photo['sizes']['z']['url']\n else:\n thumbnail_src = img_src\n\n if 'ownerNsid' not in photo:\n # should not happen, disowned photo? Show it anyway\n url = img_src\n else:\n url = build_flickr_url(photo['ownerNsid'], photo['id'])\n\n result = {\n 'url': url,\n 'img_src': img_src,\n 'thumbnail_src': thumbnail_src,\n 'source': source,\n 'img_format': img_format,\n 'template': 'images.html'\n }\n try:\n result['author'] = author.encode('utf-8')\n result['title'] = title.encode('utf-8')\n result['content'] = content.encode('utf-8')\n except:\n result['author'] = ''\n result['title'] = ''\n result['content'] = ''\n results.append(result)\n\n return results\n", "path": "searx/engines/flickr_noapi.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\n Flickr (Images)\n\n @website https://www.flickr.com\n @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)\n\n @using-api no\n @results HTML\n @stable no\n @parse url, title, thumbnail, img_src\n\"\"\"\n\nfrom json import loads\nfrom time import time\nimport re\nfrom searx.engines import logger\nfrom searx.url_utils import urlencode\nfrom searx.utils import ecma_unescape, html_to_text\n\nlogger = logger.getChild('flickr-noapi')\n\ncategories = ['images']\n\nurl = 'https://www.flickr.com/'\nsearch_url = url + 'search?{query}&page={page}'\ntime_range_url = '&min_upload_date={start}&max_upload_date={end}'\nphoto_url = 'https://www.flickr.com/photos/{userid}/{photoid}'\nmodelexport_re = re.compile(r\"^\\s*modelExport:\\s*({.*}),$\", re.M)\nimage_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's')\n\npaging = True\ntime_range_support = True\ntime_range_dict = {'day': 60 * 60 * 24,\n 'week': 60 * 60 * 24 * 7,\n 'month': 60 * 60 * 24 * 7 * 4,\n 'year': 60 * 60 * 24 * 7 * 52}\n\n\ndef build_flickr_url(user_id, photo_id):\n return photo_url.format(userid=user_id, photoid=photo_id)\n\n\ndef _get_time_range_url(time_range):\n if time_range in time_range_dict:\n return time_range_url.format(start=time(), end=str(int(time()) - time_range_dict[time_range]))\n return ''\n\n\ndef request(query, params):\n params['url'] = (search_url.format(query=urlencode({'text': query}), page=params['pageno'])\n + _get_time_range_url(params['time_range']))\n return params\n\n\ndef response(resp):\n results = []\n\n matches = modelexport_re.search(resp.text)\n\n if matches is None:\n return results\n\n match = matches.group(1)\n model_export = loads(match)\n\n if 'legend' not in model_export:\n return results\n\n legend = model_export['legend']\n\n # handle empty page\n if not legend or not legend[0]:\n return results\n\n for index in legend:\n photo = model_export['main'][index[0]][int(index[1])][index[2]][index[3]][int(index[4])]\n author = ecma_unescape(photo.get('realname', ''))\n source = ecma_unescape(photo.get('username', '')) + ' @ Flickr'\n title = ecma_unescape(photo.get('title', ''))\n content = html_to_text(ecma_unescape(photo.get('description', '')))\n img_src = None\n # From the biggest to the lowest format\n for image_size in image_sizes:\n if image_size in photo['sizes']:\n img_src = photo['sizes'][image_size]['url']\n img_format = 'jpg ' \\\n + str(photo['sizes'][image_size]['width']) \\\n + 'x' \\\n + str(photo['sizes'][image_size]['height'])\n break\n\n if not img_src:\n logger.debug('cannot find valid image size: {0}'.format(repr(photo)))\n continue\n\n # For a bigger thumbnail, keep only the url_z, not the url_n\n if 'n' in photo['sizes']:\n thumbnail_src = photo['sizes']['n']['url']\n elif 'z' in photo['sizes']:\n thumbnail_src = photo['sizes']['z']['url']\n else:\n thumbnail_src = img_src\n\n if 'ownerNsid' not in photo:\n # should not happen, disowned photo? Show it anyway\n url = img_src\n else:\n url = build_flickr_url(photo['ownerNsid'], photo['id'])\n\n result = {\n 'url': url,\n 'img_src': img_src,\n 'thumbnail_src': thumbnail_src,\n 'source': source,\n 'img_format': img_format,\n 'template': 'images.html'\n }\n try:\n result['author'] = author\n result['title'] = title\n result['content'] = content\n except:\n result['author'] = ''\n result['title'] = ''\n result['content'] = ''\n results.append(result)\n\n return results\n", "path": "searx/engines/flickr_noapi.py"}]}
| 2,039 | 169 |
gh_patches_debug_35248
|
rasdani/github-patches
|
git_diff
|
internetarchive__openlibrary-4049
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Books with non-ascii titles erroring when clicked from search
### Evidence / Screenshot (if possible)
### Relevant url?
<!-- `https://openlibrary.org/...` -->
### Steps to Reproduce
<!-- What steps caused you to find the bug? -->
1. Go to https://openlibrary.org/search?q=h%C3%A9las&mode=everything
2. Click on any book with a non-ascii character in the title
<!-- What actually happened after these steps? What did you expect to happen? -->
* Actual: 500 internal error on e.g. https://openlibrary.org/works/OL11565520W?edition=
* Expected: See the book
### Details
- **Logged in (Y/N)?** Y
- **Browser type/version?** FF82
- **Operating system?** Win10
- **Environment (prod/dev/local)?** prod
<!-- If not sure, put prod -->
### Proposal & Constraints
<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->
### Related files
<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->
### Stakeholders
@cclauss
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openlibrary/core/processors/readableurls.py`
Content:
```
1 """Various web.py application processors used in OL.
2 """
3 import os
4 import web
5
6 from infogami.utils.view import render
7 from openlibrary.core import helpers as h
8
9 from six.moves import urllib
10
11
12 try:
13 from booklending_utils.openlibrary import is_exclusion
14 except ImportError:
15 def is_exclusion(obj):
16 """Processor for determining whether records require exclusion"""
17 return False
18
19 class ReadableUrlProcessor:
20 """Open Library code works with urls like /books/OL1M and
21 /books/OL1M/edit. This processor seamlessly changes the urls to
22 /books/OL1M/title and /books/OL1M/title/edit.
23
24 The changequery function is also customized to support this.
25 """
26 patterns = [
27 (r'/\w+/OL\d+M', '/type/edition', 'title', 'untitled'),
28 (r'/\w+/ia:[a-zA-Z0-9_\.-]+', '/type/edition', 'title', 'untitled'),
29 (r'/\w+/OL\d+A', '/type/author', 'name', 'noname'),
30 (r'/\w+/OL\d+W', '/type/work', 'title', 'untitled'),
31 (r'/[/\w]+/OL\d+L', '/type/list', 'name', 'unnamed')
32 ]
33
34 def __call__(self, handler):
35 # temp hack to handle languages and users during upstream-to-www migration
36 if web.ctx.path.startswith("/l/"):
37 raise web.seeother("/languages/" + web.ctx.path[len("/l/"):])
38
39 if web.ctx.path.startswith("/user/"):
40 if not web.ctx.site.get(web.ctx.path):
41 raise web.seeother("/people/" + web.ctx.path[len("/user/"):])
42
43 real_path, readable_path = get_readable_path(web.ctx.site, web.ctx.path, self.patterns, encoding=web.ctx.encoding)
44
45 #@@ web.ctx.path is either quoted or unquoted depends on whether the application is running
46 #@@ using builtin-server or lighttpd. That is probably a bug in web.py.
47 #@@ take care of that case here till that is fixed.
48 # @@ Also, the redirection must be done only for GET requests.
49 if readable_path != web.ctx.path and readable_path != urllib.parse.quote(web.safestr(web.ctx.path)) and web.ctx.method == "GET":
50 raise web.redirect(web.safeunicode(readable_path) + web.safeunicode(web.ctx.query))
51
52 web.ctx.readable_path = readable_path
53 web.ctx.path = real_path
54 web.ctx.fullpath = web.ctx.path + web.ctx.query
55 out = handler()
56 V2_TYPES = ['works', 'books', 'people', 'authors',
57 'publishers', 'languages', 'account']
58 if out and any(web.ctx.path.startswith('/%s/' % _type) for _type in V2_TYPES):
59 out.v2 = True
60
61 # Exclude noindex items
62 if web.ctx.get('exclude'):
63 web.ctx.status = "404 Not Found"
64 return render.notfound(web.ctx.path)
65
66 return out
67
68
69 def _get_object(site, key):
70 """Returns the object with the given key.
71
72 If the key has an OLID and no object is found with that key, it tries to
73 find object with the same OLID. OL database makes sures that OLIDs are
74 unique.
75 """
76 obj = site.get(key)
77
78 if obj is None and key.startswith("/a/"):
79 key = "/authors/" + key[len("/a/"):]
80 obj = key and site.get(key)
81
82 if obj is None and key.startswith("/b/"):
83 key = "/books/" + key[len("/b/"):]
84 obj = key and site.get(key)
85
86 if obj is None and key.startswith("/user/"):
87 key = "/people/" + key[len("/user/"):]
88 obj = key and site.get(key)
89
90 basename = key.split("/")[-1]
91
92 # redirect all /.*/ia:foo to /books/ia:foo
93 if obj is None and basename.startswith("ia:"):
94 key = "/books/" + basename
95 obj = site.get(key)
96
97 # redirect all /.*/OL123W to /works/OL123W
98 if obj is None and basename.startswith("OL") and basename.endswith("W"):
99 key = "/works/" + basename
100 obj = site.get(key)
101
102 # redirect all /.*/OL123M to /books/OL123M
103 if obj is None and basename.startswith("OL") and basename.endswith("M"):
104 key = "/books/" + basename
105 obj = site.get(key)
106
107 # redirect all /.*/OL123A to /authors/OL123A
108 if obj is None and basename.startswith("OL") and basename.endswith("A"):
109 key = "/authors/" + basename
110 obj = site.get(key)
111
112 # Disabled temporarily as the index is not ready the db
113
114 #if obj is None and web.re_compile(r"/.*/OL\d+[A-Z]"):
115 # olid = web.safestr(key).split("/")[-1]
116 # key = site._request("/olid_to_key", data={"olid": olid}).key
117 # obj = key and site.get(key)
118 return obj
119
120 def get_readable_path(site, path, patterns, encoding=None):
121 """Returns real_path and readable_path from the given path.
122
123 The patterns is a list of (path_regex, type, property_name, default_value)
124 tuples.
125 """
126 def match(path):
127 for pat, type, property, default_title in patterns:
128 m = web.re_compile('^' + pat).match(path)
129 if m:
130 prefix = m.group()
131 extra = web.lstrips(path, prefix)
132 tokens = extra.split("/", 2)
133
134 # `extra` starts with "/". So first token is always empty.
135 middle = web.listget(tokens, 1, "")
136 suffix = web.listget(tokens, 2, "")
137 if suffix:
138 suffix = "/" + suffix
139
140 return type, property, default_title, prefix, middle, suffix
141 return None, None, None, None, None, None
142
143 type, property, default_title, prefix, middle, suffix = match(path)
144 if type is None:
145 path = web.safeunicode(path)
146 return (path, path)
147
148 if encoding is not None \
149 or path.endswith(".json") or path.endswith(".yml") or path.endswith(".rdf"):
150 key, ext = os.path.splitext(path)
151
152 thing = _get_object(site, key)
153 if thing:
154 path = thing.key + ext
155 path = web.safeunicode(path)
156 return (path, path)
157
158 thing = _get_object(site, prefix)
159
160 # get_object may handle redirections.
161 if thing:
162 prefix = thing.key
163
164 if thing and thing.type.key == type:
165 title = thing.get(property) or default_title
166 middle = '/' + h.urlsafe(title.strip())
167 else:
168 middle = ""
169
170 if is_exclusion(thing):
171 web.ctx.exclude = True
172
173 prefix = web.safeunicode(prefix)
174 middle = web.safeunicode(middle)
175 suffix = web.safeunicode(suffix)
176
177 return (prefix + suffix, prefix + middle + suffix)
178
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/openlibrary/core/processors/readableurls.py b/openlibrary/core/processors/readableurls.py
--- a/openlibrary/core/processors/readableurls.py
+++ b/openlibrary/core/processors/readableurls.py
@@ -1,5 +1,6 @@
"""Various web.py application processors used in OL.
"""
+import logging
import os
import web
@@ -8,6 +9,7 @@
from six.moves import urllib
+logger = logging.getLogger("openlibrary.readableurls")
try:
from booklending_utils.openlibrary import is_exclusion
@@ -123,8 +125,9 @@
The patterns is a list of (path_regex, type, property_name, default_value)
tuples.
"""
+
def match(path):
- for pat, type, property, default_title in patterns:
+ for pat, _type, _property, default_title in patterns:
m = web.re_compile('^' + pat).match(path)
if m:
prefix = m.group()
@@ -137,11 +140,12 @@
if suffix:
suffix = "/" + suffix
- return type, property, default_title, prefix, middle, suffix
+ return _type, _property, default_title, prefix, middle, suffix
return None, None, None, None, None, None
- type, property, default_title, prefix, middle, suffix = match(path)
- if type is None:
+ _type, _property, default_title, prefix, middle, suffix = match(path)
+
+ if _type is None:
path = web.safeunicode(path)
return (path, path)
@@ -161,9 +165,14 @@
if thing:
prefix = thing.key
- if thing and thing.type.key == type:
- title = thing.get(property) or default_title
- middle = '/' + h.urlsafe(title.strip())
+ if thing and thing.type.key == _type:
+ title = thing.get(_property) or default_title
+ try:
+ # Explicitly only run for python3 to solve #4033
+ from urllib.parse import quote_plus
+ middle = '/' + quote_plus(h.urlsafe(title.strip()))
+ except ImportError:
+ middle = '/' + h.urlsafe(title.strip())
else:
middle = ""
|
{"golden_diff": "diff --git a/openlibrary/core/processors/readableurls.py b/openlibrary/core/processors/readableurls.py\n--- a/openlibrary/core/processors/readableurls.py\n+++ b/openlibrary/core/processors/readableurls.py\n@@ -1,5 +1,6 @@\n \"\"\"Various web.py application processors used in OL.\n \"\"\"\n+import logging\n import os\n import web\n \n@@ -8,6 +9,7 @@\n \n from six.moves import urllib\n \n+logger = logging.getLogger(\"openlibrary.readableurls\")\n \n try:\n from booklending_utils.openlibrary import is_exclusion\n@@ -123,8 +125,9 @@\n The patterns is a list of (path_regex, type, property_name, default_value)\n tuples.\n \"\"\"\n+\n def match(path):\n- for pat, type, property, default_title in patterns:\n+ for pat, _type, _property, default_title in patterns:\n m = web.re_compile('^' + pat).match(path)\n if m:\n prefix = m.group()\n@@ -137,11 +140,12 @@\n if suffix:\n suffix = \"/\" + suffix\n \n- return type, property, default_title, prefix, middle, suffix\n+ return _type, _property, default_title, prefix, middle, suffix\n return None, None, None, None, None, None\n \n- type, property, default_title, prefix, middle, suffix = match(path)\n- if type is None:\n+ _type, _property, default_title, prefix, middle, suffix = match(path)\n+\n+ if _type is None:\n path = web.safeunicode(path)\n return (path, path)\n \n@@ -161,9 +165,14 @@\n if thing:\n prefix = thing.key\n \n- if thing and thing.type.key == type:\n- title = thing.get(property) or default_title\n- middle = '/' + h.urlsafe(title.strip())\n+ if thing and thing.type.key == _type:\n+ title = thing.get(_property) or default_title\n+ try:\n+ # Explicitly only run for python3 to solve #4033\n+ from urllib.parse import quote_plus\n+ middle = '/' + quote_plus(h.urlsafe(title.strip()))\n+ except ImportError:\n+ middle = '/' + h.urlsafe(title.strip())\n else:\n middle = \"\"\n", "issue": "Books with non-ascii titles erroring when clicked from search\n### Evidence / Screenshot (if possible)\r\n\r\n### Relevant url?\r\n<!-- `https://openlibrary.org/...` -->\r\n\r\n### Steps to Reproduce\r\n<!-- What steps caused you to find the bug? -->\r\n1. Go to https://openlibrary.org/search?q=h%C3%A9las&mode=everything\r\n2. Click on any book with a non-ascii character in the title\r\n\r\n<!-- What actually happened after these steps? What did you expect to happen? -->\r\n* Actual: 500 internal error on e.g. https://openlibrary.org/works/OL11565520W?edition=\r\n* Expected: See the book\r\n\r\n### Details\r\n\r\n- **Logged in (Y/N)?** Y\r\n- **Browser type/version?** FF82\r\n- **Operating system?** Win10\r\n- **Environment (prod/dev/local)?** prod\r\n<!-- If not sure, put prod -->\r\n\r\n### Proposal & Constraints\r\n<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->\r\n\r\n### Related files\r\n<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->\r\n\r\n### Stakeholders\r\n@cclauss \r\n\n", "before_files": [{"content": "\"\"\"Various web.py application processors used in OL.\n\"\"\"\nimport os\nimport web\n\nfrom infogami.utils.view import render\nfrom openlibrary.core import helpers as h\n\nfrom six.moves import urllib\n\n\ntry:\n from booklending_utils.openlibrary import is_exclusion\nexcept ImportError:\n def is_exclusion(obj):\n \"\"\"Processor for determining whether records require exclusion\"\"\"\n return False\n\nclass ReadableUrlProcessor:\n \"\"\"Open Library code works with urls like /books/OL1M and\n /books/OL1M/edit. This processor seamlessly changes the urls to\n /books/OL1M/title and /books/OL1M/title/edit.\n\n The changequery function is also customized to support this.\n \"\"\"\n patterns = [\n (r'/\\w+/OL\\d+M', '/type/edition', 'title', 'untitled'),\n (r'/\\w+/ia:[a-zA-Z0-9_\\.-]+', '/type/edition', 'title', 'untitled'),\n (r'/\\w+/OL\\d+A', '/type/author', 'name', 'noname'),\n (r'/\\w+/OL\\d+W', '/type/work', 'title', 'untitled'),\n (r'/[/\\w]+/OL\\d+L', '/type/list', 'name', 'unnamed')\n ]\n\n def __call__(self, handler):\n # temp hack to handle languages and users during upstream-to-www migration\n if web.ctx.path.startswith(\"/l/\"):\n raise web.seeother(\"/languages/\" + web.ctx.path[len(\"/l/\"):])\n\n if web.ctx.path.startswith(\"/user/\"):\n if not web.ctx.site.get(web.ctx.path):\n raise web.seeother(\"/people/\" + web.ctx.path[len(\"/user/\"):])\n\n real_path, readable_path = get_readable_path(web.ctx.site, web.ctx.path, self.patterns, encoding=web.ctx.encoding)\n\n #@@ web.ctx.path is either quoted or unquoted depends on whether the application is running\n #@@ using builtin-server or lighttpd. That is probably a bug in web.py.\n #@@ take care of that case here till that is fixed.\n # @@ Also, the redirection must be done only for GET requests.\n if readable_path != web.ctx.path and readable_path != urllib.parse.quote(web.safestr(web.ctx.path)) and web.ctx.method == \"GET\":\n raise web.redirect(web.safeunicode(readable_path) + web.safeunicode(web.ctx.query))\n\n web.ctx.readable_path = readable_path\n web.ctx.path = real_path\n web.ctx.fullpath = web.ctx.path + web.ctx.query\n out = handler()\n V2_TYPES = ['works', 'books', 'people', 'authors',\n 'publishers', 'languages', 'account']\n if out and any(web.ctx.path.startswith('/%s/' % _type) for _type in V2_TYPES):\n out.v2 = True\n\n # Exclude noindex items\n if web.ctx.get('exclude'):\n web.ctx.status = \"404 Not Found\"\n return render.notfound(web.ctx.path)\n\n return out\n\n\ndef _get_object(site, key):\n \"\"\"Returns the object with the given key.\n\n If the key has an OLID and no object is found with that key, it tries to\n find object with the same OLID. OL database makes sures that OLIDs are\n unique.\n \"\"\"\n obj = site.get(key)\n\n if obj is None and key.startswith(\"/a/\"):\n key = \"/authors/\" + key[len(\"/a/\"):]\n obj = key and site.get(key)\n\n if obj is None and key.startswith(\"/b/\"):\n key = \"/books/\" + key[len(\"/b/\"):]\n obj = key and site.get(key)\n\n if obj is None and key.startswith(\"/user/\"):\n key = \"/people/\" + key[len(\"/user/\"):]\n obj = key and site.get(key)\n\n basename = key.split(\"/\")[-1]\n\n # redirect all /.*/ia:foo to /books/ia:foo\n if obj is None and basename.startswith(\"ia:\"):\n key = \"/books/\" + basename\n obj = site.get(key)\n\n # redirect all /.*/OL123W to /works/OL123W\n if obj is None and basename.startswith(\"OL\") and basename.endswith(\"W\"):\n key = \"/works/\" + basename\n obj = site.get(key)\n\n # redirect all /.*/OL123M to /books/OL123M\n if obj is None and basename.startswith(\"OL\") and basename.endswith(\"M\"):\n key = \"/books/\" + basename\n obj = site.get(key)\n\n # redirect all /.*/OL123A to /authors/OL123A\n if obj is None and basename.startswith(\"OL\") and basename.endswith(\"A\"):\n key = \"/authors/\" + basename\n obj = site.get(key)\n\n # Disabled temporarily as the index is not ready the db\n\n #if obj is None and web.re_compile(r\"/.*/OL\\d+[A-Z]\"):\n # olid = web.safestr(key).split(\"/\")[-1]\n # key = site._request(\"/olid_to_key\", data={\"olid\": olid}).key\n # obj = key and site.get(key)\n return obj\n\ndef get_readable_path(site, path, patterns, encoding=None):\n \"\"\"Returns real_path and readable_path from the given path.\n\n The patterns is a list of (path_regex, type, property_name, default_value)\n tuples.\n \"\"\"\n def match(path):\n for pat, type, property, default_title in patterns:\n m = web.re_compile('^' + pat).match(path)\n if m:\n prefix = m.group()\n extra = web.lstrips(path, prefix)\n tokens = extra.split(\"/\", 2)\n\n # `extra` starts with \"/\". So first token is always empty.\n middle = web.listget(tokens, 1, \"\")\n suffix = web.listget(tokens, 2, \"\")\n if suffix:\n suffix = \"/\" + suffix\n\n return type, property, default_title, prefix, middle, suffix\n return None, None, None, None, None, None\n\n type, property, default_title, prefix, middle, suffix = match(path)\n if type is None:\n path = web.safeunicode(path)\n return (path, path)\n\n if encoding is not None \\\n or path.endswith(\".json\") or path.endswith(\".yml\") or path.endswith(\".rdf\"):\n key, ext = os.path.splitext(path)\n\n thing = _get_object(site, key)\n if thing:\n path = thing.key + ext\n path = web.safeunicode(path)\n return (path, path)\n\n thing = _get_object(site, prefix)\n\n # get_object may handle redirections.\n if thing:\n prefix = thing.key\n\n if thing and thing.type.key == type:\n title = thing.get(property) or default_title\n middle = '/' + h.urlsafe(title.strip())\n else:\n middle = \"\"\n\n if is_exclusion(thing):\n web.ctx.exclude = True\n\n prefix = web.safeunicode(prefix)\n middle = web.safeunicode(middle)\n suffix = web.safeunicode(suffix)\n\n return (prefix + suffix, prefix + middle + suffix)\n", "path": "openlibrary/core/processors/readableurls.py"}], "after_files": [{"content": "\"\"\"Various web.py application processors used in OL.\n\"\"\"\nimport logging\nimport os\nimport web\n\nfrom infogami.utils.view import render\nfrom openlibrary.core import helpers as h\n\nfrom six.moves import urllib\n\nlogger = logging.getLogger(\"openlibrary.readableurls\")\n\ntry:\n from booklending_utils.openlibrary import is_exclusion\nexcept ImportError:\n def is_exclusion(obj):\n \"\"\"Processor for determining whether records require exclusion\"\"\"\n return False\n\nclass ReadableUrlProcessor:\n \"\"\"Open Library code works with urls like /books/OL1M and\n /books/OL1M/edit. This processor seamlessly changes the urls to\n /books/OL1M/title and /books/OL1M/title/edit.\n\n The changequery function is also customized to support this.\n \"\"\"\n patterns = [\n (r'/\\w+/OL\\d+M', '/type/edition', 'title', 'untitled'),\n (r'/\\w+/ia:[a-zA-Z0-9_\\.-]+', '/type/edition', 'title', 'untitled'),\n (r'/\\w+/OL\\d+A', '/type/author', 'name', 'noname'),\n (r'/\\w+/OL\\d+W', '/type/work', 'title', 'untitled'),\n (r'/[/\\w]+/OL\\d+L', '/type/list', 'name', 'unnamed')\n ]\n\n def __call__(self, handler):\n # temp hack to handle languages and users during upstream-to-www migration\n if web.ctx.path.startswith(\"/l/\"):\n raise web.seeother(\"/languages/\" + web.ctx.path[len(\"/l/\"):])\n\n if web.ctx.path.startswith(\"/user/\"):\n if not web.ctx.site.get(web.ctx.path):\n raise web.seeother(\"/people/\" + web.ctx.path[len(\"/user/\"):])\n\n real_path, readable_path = get_readable_path(web.ctx.site, web.ctx.path, self.patterns, encoding=web.ctx.encoding)\n\n #@@ web.ctx.path is either quoted or unquoted depends on whether the application is running\n #@@ using builtin-server or lighttpd. That is probably a bug in web.py.\n #@@ take care of that case here till that is fixed.\n # @@ Also, the redirection must be done only for GET requests.\n if readable_path != web.ctx.path and readable_path != urllib.parse.quote(web.safestr(web.ctx.path)) and web.ctx.method == \"GET\":\n raise web.redirect(web.safeunicode(readable_path) + web.safeunicode(web.ctx.query))\n\n web.ctx.readable_path = readable_path\n web.ctx.path = real_path\n web.ctx.fullpath = web.ctx.path + web.ctx.query\n out = handler()\n V2_TYPES = ['works', 'books', 'people', 'authors',\n 'publishers', 'languages', 'account']\n if out and any(web.ctx.path.startswith('/%s/' % _type) for _type in V2_TYPES):\n out.v2 = True\n\n # Exclude noindex items\n if web.ctx.get('exclude'):\n web.ctx.status = \"404 Not Found\"\n return render.notfound(web.ctx.path)\n\n return out\n\n\ndef _get_object(site, key):\n \"\"\"Returns the object with the given key.\n\n If the key has an OLID and no object is found with that key, it tries to\n find object with the same OLID. OL database makes sures that OLIDs are\n unique.\n \"\"\"\n obj = site.get(key)\n\n if obj is None and key.startswith(\"/a/\"):\n key = \"/authors/\" + key[len(\"/a/\"):]\n obj = key and site.get(key)\n\n if obj is None and key.startswith(\"/b/\"):\n key = \"/books/\" + key[len(\"/b/\"):]\n obj = key and site.get(key)\n\n if obj is None and key.startswith(\"/user/\"):\n key = \"/people/\" + key[len(\"/user/\"):]\n obj = key and site.get(key)\n\n basename = key.split(\"/\")[-1]\n\n # redirect all /.*/ia:foo to /books/ia:foo\n if obj is None and basename.startswith(\"ia:\"):\n key = \"/books/\" + basename\n obj = site.get(key)\n\n # redirect all /.*/OL123W to /works/OL123W\n if obj is None and basename.startswith(\"OL\") and basename.endswith(\"W\"):\n key = \"/works/\" + basename\n obj = site.get(key)\n\n # redirect all /.*/OL123M to /books/OL123M\n if obj is None and basename.startswith(\"OL\") and basename.endswith(\"M\"):\n key = \"/books/\" + basename\n obj = site.get(key)\n\n # redirect all /.*/OL123A to /authors/OL123A\n if obj is None and basename.startswith(\"OL\") and basename.endswith(\"A\"):\n key = \"/authors/\" + basename\n obj = site.get(key)\n\n # Disabled temporarily as the index is not ready the db\n\n #if obj is None and web.re_compile(r\"/.*/OL\\d+[A-Z]\"):\n # olid = web.safestr(key).split(\"/\")[-1]\n # key = site._request(\"/olid_to_key\", data={\"olid\": olid}).key\n # obj = key and site.get(key)\n return obj\n\ndef get_readable_path(site, path, patterns, encoding=None):\n \"\"\"Returns real_path and readable_path from the given path.\n\n The patterns is a list of (path_regex, type, property_name, default_value)\n tuples.\n \"\"\"\n\n def match(path):\n for pat, _type, _property, default_title in patterns:\n m = web.re_compile('^' + pat).match(path)\n if m:\n prefix = m.group()\n extra = web.lstrips(path, prefix)\n tokens = extra.split(\"/\", 2)\n\n # `extra` starts with \"/\". So first token is always empty.\n middle = web.listget(tokens, 1, \"\")\n suffix = web.listget(tokens, 2, \"\")\n if suffix:\n suffix = \"/\" + suffix\n\n return _type, _property, default_title, prefix, middle, suffix\n return None, None, None, None, None, None\n\n _type, _property, default_title, prefix, middle, suffix = match(path)\n\n if _type is None:\n path = web.safeunicode(path)\n return (path, path)\n\n if encoding is not None \\\n or path.endswith(\".json\") or path.endswith(\".yml\") or path.endswith(\".rdf\"):\n key, ext = os.path.splitext(path)\n\n thing = _get_object(site, key)\n if thing:\n path = thing.key + ext\n path = web.safeunicode(path)\n return (path, path)\n\n thing = _get_object(site, prefix)\n\n # get_object may handle redirections.\n if thing:\n prefix = thing.key\n\n if thing and thing.type.key == _type:\n title = thing.get(_property) or default_title\n try:\n # Explicitly only run for python3 to solve #4033\n from urllib.parse import quote_plus\n middle = '/' + quote_plus(h.urlsafe(title.strip()))\n except ImportError:\n middle = '/' + h.urlsafe(title.strip())\n else:\n middle = \"\"\n\n if is_exclusion(thing):\n web.ctx.exclude = True\n\n prefix = web.safeunicode(prefix)\n middle = web.safeunicode(middle)\n suffix = web.safeunicode(suffix)\n\n return (prefix + suffix, prefix + middle + suffix)\n", "path": "openlibrary/core/processors/readableurls.py"}]}
| 2,569 | 521 |
gh_patches_debug_23348
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-8210
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tags over 100 characters
Found a bug? Please fill out the sections below. 👍
### Issue Summary
When adding a tag while using the ClusterTaggableManager class, if the tag name is greater than the character limit for the database column no validation error is given.
### Steps to Reproduce
1. login to admin and edit a page with a tag content panel
2. create a tag with more than 100 characters
3. save, or publish the page
### Technical details
* Python version: Python 3.5.1
* Django version: 1.11.13
* Wagtail version: 1.13.1
Tags over 100 characters
Found a bug? Please fill out the sections below. 👍
### Issue Summary
When adding a tag while using the ClusterTaggableManager class, if the tag name is greater than the character limit for the database column no validation error is given.
### Steps to Reproduce
1. login to admin and edit a page with a tag content panel
2. create a tag with more than 100 characters
3. save, or publish the page
### Technical details
* Python version: Python 3.5.1
* Django version: 1.11.13
* Wagtail version: 1.13.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/admin/forms/tags.py`
Content:
```
1 from taggit.forms import TagField as TaggitTagField
2 from taggit.models import Tag
3
4 from wagtail.admin.widgets import AdminTagWidget
5
6
7 class TagField(TaggitTagField):
8 """
9 Extends taggit's TagField with the option to prevent creating tags that do not already exist
10 """
11
12 widget = AdminTagWidget
13
14 def __init__(self, *args, **kwargs):
15 self.tag_model = kwargs.pop("tag_model", None)
16 self.free_tagging = kwargs.pop("free_tagging", None)
17
18 super().__init__(*args, **kwargs)
19
20 # pass on tag_model and free_tagging kwargs to the widget,
21 # if (and only if) they have been passed explicitly here.
22 # Otherwise, set default values for clean() to use
23 if self.tag_model is None:
24 self.tag_model = Tag
25 else:
26 self.widget.tag_model = self.tag_model
27
28 if self.free_tagging is None:
29 self.free_tagging = getattr(self.tag_model, "free_tagging", True)
30 else:
31 self.widget.free_tagging = self.free_tagging
32
33 def clean(self, value):
34 value = super().clean(value)
35
36 if not self.free_tagging:
37 # filter value to just the tags that already exist in tag_model
38 value = list(
39 self.tag_model.objects.filter(name__in=value).values_list(
40 "name", flat=True
41 )
42 )
43
44 return value
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wagtail/admin/forms/tags.py b/wagtail/admin/forms/tags.py
--- a/wagtail/admin/forms/tags.py
+++ b/wagtail/admin/forms/tags.py
@@ -1,3 +1,5 @@
+from django.core.exceptions import ValidationError
+from django.utils.translation import gettext_lazy as _
from taggit.forms import TagField as TaggitTagField
from taggit.models import Tag
@@ -31,8 +33,27 @@
self.widget.free_tagging = self.free_tagging
def clean(self, value):
+
value = super().clean(value)
+ max_tag_length = self.tag_model.name.field.max_length
+ value_too_long = ""
+ for val in value:
+ if len(val) > max_tag_length:
+ if value_too_long:
+ value_too_long += ", "
+ value_too_long += val
+ if value_too_long:
+ raise ValidationError(
+ _(
+ "Tag(s) %(value_too_long)s are over %(max_tag_length)d characters"
+ % {
+ "value_too_long": value_too_long,
+ "max_tag_length": max_tag_length,
+ }
+ )
+ )
+
if not self.free_tagging:
# filter value to just the tags that already exist in tag_model
value = list(
|
{"golden_diff": "diff --git a/wagtail/admin/forms/tags.py b/wagtail/admin/forms/tags.py\n--- a/wagtail/admin/forms/tags.py\n+++ b/wagtail/admin/forms/tags.py\n@@ -1,3 +1,5 @@\n+from django.core.exceptions import ValidationError\n+from django.utils.translation import gettext_lazy as _\n from taggit.forms import TagField as TaggitTagField\n from taggit.models import Tag\n \n@@ -31,8 +33,27 @@\n self.widget.free_tagging = self.free_tagging\n \n def clean(self, value):\n+\n value = super().clean(value)\n \n+ max_tag_length = self.tag_model.name.field.max_length\n+ value_too_long = \"\"\n+ for val in value:\n+ if len(val) > max_tag_length:\n+ if value_too_long:\n+ value_too_long += \", \"\n+ value_too_long += val\n+ if value_too_long:\n+ raise ValidationError(\n+ _(\n+ \"Tag(s) %(value_too_long)s are over %(max_tag_length)d characters\"\n+ % {\n+ \"value_too_long\": value_too_long,\n+ \"max_tag_length\": max_tag_length,\n+ }\n+ )\n+ )\n+\n if not self.free_tagging:\n # filter value to just the tags that already exist in tag_model\n value = list(\n", "issue": "Tags over 100 characters\nFound a bug? Please fill out the sections below. \ud83d\udc4d\r\n\r\n### Issue Summary\r\n\r\nWhen adding a tag while using the ClusterTaggableManager class, if the tag name is greater than the character limit for the database column no validation error is given.\r\n\r\n### Steps to Reproduce\r\n\r\n1. login to admin and edit a page with a tag content panel\r\n2. create a tag with more than 100 characters\r\n3. save, or publish the page \r\n\r\n### Technical details\r\n\r\n* Python version: Python 3.5.1\r\n* Django version: 1.11.13\r\n* Wagtail version: 1.13.1\nTags over 100 characters\nFound a bug? Please fill out the sections below. \ud83d\udc4d\r\n\r\n### Issue Summary\r\n\r\nWhen adding a tag while using the ClusterTaggableManager class, if the tag name is greater than the character limit for the database column no validation error is given.\r\n\r\n### Steps to Reproduce\r\n\r\n1. login to admin and edit a page with a tag content panel\r\n2. create a tag with more than 100 characters\r\n3. save, or publish the page \r\n\r\n### Technical details\r\n\r\n* Python version: Python 3.5.1\r\n* Django version: 1.11.13\r\n* Wagtail version: 1.13.1\n", "before_files": [{"content": "from taggit.forms import TagField as TaggitTagField\nfrom taggit.models import Tag\n\nfrom wagtail.admin.widgets import AdminTagWidget\n\n\nclass TagField(TaggitTagField):\n \"\"\"\n Extends taggit's TagField with the option to prevent creating tags that do not already exist\n \"\"\"\n\n widget = AdminTagWidget\n\n def __init__(self, *args, **kwargs):\n self.tag_model = kwargs.pop(\"tag_model\", None)\n self.free_tagging = kwargs.pop(\"free_tagging\", None)\n\n super().__init__(*args, **kwargs)\n\n # pass on tag_model and free_tagging kwargs to the widget,\n # if (and only if) they have been passed explicitly here.\n # Otherwise, set default values for clean() to use\n if self.tag_model is None:\n self.tag_model = Tag\n else:\n self.widget.tag_model = self.tag_model\n\n if self.free_tagging is None:\n self.free_tagging = getattr(self.tag_model, \"free_tagging\", True)\n else:\n self.widget.free_tagging = self.free_tagging\n\n def clean(self, value):\n value = super().clean(value)\n\n if not self.free_tagging:\n # filter value to just the tags that already exist in tag_model\n value = list(\n self.tag_model.objects.filter(name__in=value).values_list(\n \"name\", flat=True\n )\n )\n\n return value\n", "path": "wagtail/admin/forms/tags.py"}], "after_files": [{"content": "from django.core.exceptions import ValidationError\nfrom django.utils.translation import gettext_lazy as _\nfrom taggit.forms import TagField as TaggitTagField\nfrom taggit.models import Tag\n\nfrom wagtail.admin.widgets import AdminTagWidget\n\n\nclass TagField(TaggitTagField):\n \"\"\"\n Extends taggit's TagField with the option to prevent creating tags that do not already exist\n \"\"\"\n\n widget = AdminTagWidget\n\n def __init__(self, *args, **kwargs):\n self.tag_model = kwargs.pop(\"tag_model\", None)\n self.free_tagging = kwargs.pop(\"free_tagging\", None)\n\n super().__init__(*args, **kwargs)\n\n # pass on tag_model and free_tagging kwargs to the widget,\n # if (and only if) they have been passed explicitly here.\n # Otherwise, set default values for clean() to use\n if self.tag_model is None:\n self.tag_model = Tag\n else:\n self.widget.tag_model = self.tag_model\n\n if self.free_tagging is None:\n self.free_tagging = getattr(self.tag_model, \"free_tagging\", True)\n else:\n self.widget.free_tagging = self.free_tagging\n\n def clean(self, value):\n\n value = super().clean(value)\n\n max_tag_length = self.tag_model.name.field.max_length\n value_too_long = \"\"\n for val in value:\n if len(val) > max_tag_length:\n if value_too_long:\n value_too_long += \", \"\n value_too_long += val\n if value_too_long:\n raise ValidationError(\n _(\n \"Tag(s) %(value_too_long)s are over %(max_tag_length)d characters\"\n % {\n \"value_too_long\": value_too_long,\n \"max_tag_length\": max_tag_length,\n }\n )\n )\n\n if not self.free_tagging:\n # filter value to just the tags that already exist in tag_model\n value = list(\n self.tag_model.objects.filter(name__in=value).values_list(\n \"name\", flat=True\n )\n )\n\n return value\n", "path": "wagtail/admin/forms/tags.py"}]}
| 949 | 300 |
gh_patches_debug_24605
|
rasdani/github-patches
|
git_diff
|
lutris__lutris-1180
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Game Sync Bug (Proton,Steamworks Banner etc.)
If you ever install a Winesteam Game or use Steam Play with Proton from the beta branch, Lutris scan some necessary Steam Applications and show it as Gamebanner:

this happen cause the Application have a SteamID:
in this case:
Proton 3.7 Beta: 930400
Proton 3.7: 858280
(also Steamworks SDK from Wine-Steam have a ID, but i dont have it installed atm)
As a User, we can "delete" it via the Context Menu from the Lutris Main Window but it dont work permanent, cause after a restart it get again synced and shown.
Pls to fix :+1:
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lutris/services/steam.py`
Content:
```
1 import os
2 import re
3 from collections import defaultdict
4
5 from lutris import pga
6 from lutris.util.log import logger
7 from lutris.util.steam import vdf_parse
8 from lutris.util.system import fix_path_case
9 from lutris.util.strings import slugify
10 from lutris.config import make_game_config_id, LutrisConfig
11
12 NAME = 'Steam'
13
14 APP_STATE_FLAGS = [
15 "Invalid",
16 "Uninstalled",
17 "Update Required",
18 "Fully Installed",
19 "Encrypted",
20 "Locked",
21 "Files Missing",
22 "AppRunning",
23 "Files Corrupt",
24 "Update Running",
25 "Update Paused",
26 "Update Started",
27 "Uninstalling",
28 "Backup Running",
29 "Reconfiguring",
30 "Validating",
31 "Adding Files",
32 "Preallocating",
33 "Downloading",
34 "Staging",
35 "Committing",
36 "Update Stopping"
37 ]
38
39
40 class AppManifest:
41 def __init__(self, appmanifest_path):
42 self.appmanifest_path = appmanifest_path
43 self.steamapps_path, filename = os.path.split(appmanifest_path)
44 self.steamid = re.findall(r'(\d+)', filename)[-1]
45 if os.path.exists(appmanifest_path):
46 with open(appmanifest_path, "r") as appmanifest_file:
47 self.appmanifest_data = vdf_parse(appmanifest_file, {})
48
49 def __repr__(self):
50 return "<AppManifest: %s>" % self.appmanifest_path
51
52 @property
53 def app_state(self):
54 return self.appmanifest_data.get('AppState') or {}
55
56 @property
57 def user_config(self):
58 return self.app_state.get('UserConfig') or {}
59
60 @property
61 def name(self):
62 _name = self.app_state.get('name')
63 if not _name:
64 _name = self.user_config.get('name')
65 return _name
66
67 @property
68 def slug(self):
69 return slugify(self.name)
70
71 @property
72 def installdir(self):
73 return self.app_state.get('installdir')
74
75 @property
76 def states(self):
77 """Return the states of a Steam game."""
78 states = []
79 state_flags = self.app_state.get('StateFlags', 0)
80 state_flags = bin(int(state_flags))[:1:-1]
81 for index, flag in enumerate(state_flags):
82 if flag == '1':
83 states.append(APP_STATE_FLAGS[index + 1])
84 return states
85
86 def is_installed(self):
87 return 'Fully Installed' in self.states
88
89 def get_install_path(self):
90 if not self.installdir:
91 return
92 install_path = fix_path_case(os.path.join(self.steamapps_path, "common",
93 self.installdir))
94 if install_path:
95 return install_path
96
97 def get_platform(self):
98 steamapps_paths = get_steamapps_paths()
99 if self.steamapps_path in steamapps_paths['linux']:
100 return 'linux'
101 elif self.steamapps_path in steamapps_paths['windows']:
102 return 'windows'
103 else:
104 raise ValueError("Can't find %s in %s"
105 % (self.steamapps_path, steamapps_paths))
106
107 def get_runner_name(self):
108 platform = self.get_platform()
109 if platform == 'linux':
110 return 'steam'
111 else:
112 return 'winesteam'
113
114
115 def get_appmanifests(steamapps_path):
116 """Return the list for all appmanifest files in a Steam library folder"""
117 return [f for f in os.listdir(steamapps_path)
118 if re.match(r'^appmanifest_\d+.acf$', f)]
119
120
121 def get_steamapps_paths_for_platform(platform_name):
122 from lutris.runners import winesteam, steam
123 runners = {
124 'linux': steam.steam,
125 'windows': winesteam.winesteam
126 }
127 runner = runners[platform_name]()
128 return runner.get_steamapps_dirs()
129
130
131 def get_steamapps_paths(flat=False, platform=None):
132 base_platforms = ['linux', 'windows']
133 if flat:
134 steamapps_paths = []
135 else:
136 steamapps_paths = defaultdict(list)
137
138 if platform:
139 if platform not in base_platforms:
140 raise ValueError("Illegal value for Steam platform: %s" % platform)
141 platforms = [platform]
142 else:
143 platforms = base_platforms
144
145 for platform in platforms:
146 folders = get_steamapps_paths_for_platform(platform)
147 if flat:
148 steamapps_paths += folders
149 else:
150 steamapps_paths[platform] = folders
151
152 return steamapps_paths
153
154
155 def get_appmanifest_from_appid(steamapps_path, appid):
156 """Given the steam apps path and appid, return the corresponding appmanifest"""
157 if not steamapps_path:
158 raise ValueError("steamapps_path is mandatory")
159 if not os.path.exists(steamapps_path):
160 raise IOError("steamapps_path must be a valid directory")
161 if not appid:
162 raise ValueError("Missing mandatory appid")
163 appmanifest_path = os.path.join(steamapps_path, "appmanifest_%s.acf" % appid)
164 if not os.path.exists(appmanifest_path):
165 return
166 return AppManifest(appmanifest_path)
167
168
169 def get_path_from_appmanifest(steamapps_path, appid):
170 """Return the path where a Steam game is installed."""
171 appmanifest = get_appmanifest_from_appid(steamapps_path, appid)
172 if not appmanifest:
173 return
174 return appmanifest.get_install_path()
175
176
177 def mark_as_installed(steamid, runner_name, game_info):
178 for key in ['name', 'slug']:
179 assert game_info[key]
180 logger.info("Setting %s as installed" % game_info['name'])
181 config_id = (game_info.get('config_path') or make_game_config_id(game_info['slug']))
182 game_id = pga.add_or_update(
183 steamid=int(steamid),
184 name=game_info['name'],
185 runner=runner_name,
186 slug=game_info['slug'],
187 installed=1,
188 configpath=config_id,
189 )
190
191 game_config = LutrisConfig(
192 runner_slug=runner_name,
193 game_config_id=config_id,
194 )
195 game_config.raw_game_config.update({'appid': steamid})
196 game_config.save()
197 return game_id
198
199
200 def mark_as_uninstalled(game_info):
201 for key in ('id', 'name'):
202 if key not in game_info:
203 raise ValueError("Missing %s field in %s" % (key, game_info))
204 logger.info('Setting %s as uninstalled' % game_info['name'])
205 game_id = pga.add_or_update(
206 id=game_info['id'],
207 runner='',
208 installed=0
209 )
210 return game_id
211
212
213 def sync_appmanifest_state(appmanifest_path, name=None, slug=None):
214 try:
215 appmanifest = AppManifest(appmanifest_path)
216 except Exception:
217 logger.error("Unable to parse file %s", appmanifest_path)
218 return
219 if appmanifest.is_installed():
220 game_info = {
221 'name': name or appmanifest.name,
222 'slug': slug or appmanifest.slug,
223 }
224 runner_name = appmanifest.get_runner_name()
225 mark_as_installed(appmanifest.steamid, runner_name, game_info)
226
227
228 def sync_with_lutris(platform='linux'):
229 steamapps_paths = get_steamapps_paths()
230 steam_games_in_lutris = pga.get_games_where(steamid__isnull=False, steamid__not='')
231 steamids_in_lutris = set([str(game['steamid']) for game in steam_games_in_lutris])
232 seen_ids = set() # Set of Steam appids seen while browsing AppManifests
233
234 for steamapps_path in steamapps_paths[platform]:
235 appmanifests = get_appmanifests(steamapps_path)
236 for appmanifest_file in appmanifests:
237 steamid = re.findall(r'(\d+)', appmanifest_file)[0]
238 seen_ids.add(steamid)
239 appmanifest_path = os.path.join(steamapps_path, appmanifest_file)
240 if steamid not in steamids_in_lutris:
241 # New Steam game, not seen before in Lutris,
242 if platform != 'linux':
243 # Windows games might require additional steps.
244 # TODO: Find a way to mark games as "Not fully configured"
245 # as the status.
246 logger.warning("Importing Steam game %s but game might require additional configuration")
247 sync_appmanifest_state(appmanifest_path)
248 else:
249 # Lookup previously installed Steam games
250 pga_entry = None
251 for game in steam_games_in_lutris:
252 if str(game['steamid']) == steamid and not game['installed']:
253 pga_entry = game
254 break
255 if pga_entry:
256 sync_appmanifest_state(appmanifest_path, name=pga_entry['name'], slug=pga_entry['slug'])
257 unavailable_ids = steamids_in_lutris.difference(seen_ids)
258 for steamid in unavailable_ids:
259 for game in steam_games_in_lutris:
260 runner = 'steam' if platform == 'linux' else 'winesteam'
261 if str(game['steamid']) == steamid \
262 and game['installed'] \
263 and game['runner'] == runner:
264 mark_as_uninstalled(game)
265
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lutris/services/steam.py b/lutris/services/steam.py
--- a/lutris/services/steam.py
+++ b/lutris/services/steam.py
@@ -228,6 +228,7 @@
def sync_with_lutris(platform='linux'):
steamapps_paths = get_steamapps_paths()
steam_games_in_lutris = pga.get_games_where(steamid__isnull=False, steamid__not='')
+ proton_ids = ["858280", "930400"]
steamids_in_lutris = set([str(game['steamid']) for game in steam_games_in_lutris])
seen_ids = set() # Set of Steam appids seen while browsing AppManifests
@@ -237,7 +238,7 @@
steamid = re.findall(r'(\d+)', appmanifest_file)[0]
seen_ids.add(steamid)
appmanifest_path = os.path.join(steamapps_path, appmanifest_file)
- if steamid not in steamids_in_lutris:
+ if steamid not in steamids_in_lutris and steamid not in proton_ids:
# New Steam game, not seen before in Lutris,
if platform != 'linux':
# Windows games might require additional steps.
|
{"golden_diff": "diff --git a/lutris/services/steam.py b/lutris/services/steam.py\n--- a/lutris/services/steam.py\n+++ b/lutris/services/steam.py\n@@ -228,6 +228,7 @@\n def sync_with_lutris(platform='linux'):\n steamapps_paths = get_steamapps_paths()\n steam_games_in_lutris = pga.get_games_where(steamid__isnull=False, steamid__not='')\n+ proton_ids = [\"858280\", \"930400\"]\n steamids_in_lutris = set([str(game['steamid']) for game in steam_games_in_lutris])\n seen_ids = set() # Set of Steam appids seen while browsing AppManifests\n \n@@ -237,7 +238,7 @@\n steamid = re.findall(r'(\\d+)', appmanifest_file)[0]\n seen_ids.add(steamid)\n appmanifest_path = os.path.join(steamapps_path, appmanifest_file)\n- if steamid not in steamids_in_lutris:\n+ if steamid not in steamids_in_lutris and steamid not in proton_ids:\n # New Steam game, not seen before in Lutris,\n if platform != 'linux':\n # Windows games might require additional steps.\n", "issue": "Game Sync Bug (Proton,Steamworks Banner etc.)\nIf you ever install a Winesteam Game or use Steam Play with Proton from the beta branch, Lutris scan some necessary Steam Applications and show it as Gamebanner:\r\n\r\n\r\n\r\nthis happen cause the Application have a SteamID:\r\n\r\nin this case:\r\nProton 3.7 Beta: 930400\r\nProton 3.7: 858280\r\n\r\n(also Steamworks SDK from Wine-Steam have a ID, but i dont have it installed atm)\r\n\r\nAs a User, we can \"delete\" it via the Context Menu from the Lutris Main Window but it dont work permanent, cause after a restart it get again synced and shown.\r\n\r\nPls to fix :+1: \r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import os\nimport re\nfrom collections import defaultdict\n\nfrom lutris import pga\nfrom lutris.util.log import logger\nfrom lutris.util.steam import vdf_parse\nfrom lutris.util.system import fix_path_case\nfrom lutris.util.strings import slugify\nfrom lutris.config import make_game_config_id, LutrisConfig\n\nNAME = 'Steam'\n\nAPP_STATE_FLAGS = [\n \"Invalid\",\n \"Uninstalled\",\n \"Update Required\",\n \"Fully Installed\",\n \"Encrypted\",\n \"Locked\",\n \"Files Missing\",\n \"AppRunning\",\n \"Files Corrupt\",\n \"Update Running\",\n \"Update Paused\",\n \"Update Started\",\n \"Uninstalling\",\n \"Backup Running\",\n \"Reconfiguring\",\n \"Validating\",\n \"Adding Files\",\n \"Preallocating\",\n \"Downloading\",\n \"Staging\",\n \"Committing\",\n \"Update Stopping\"\n]\n\n\nclass AppManifest:\n def __init__(self, appmanifest_path):\n self.appmanifest_path = appmanifest_path\n self.steamapps_path, filename = os.path.split(appmanifest_path)\n self.steamid = re.findall(r'(\\d+)', filename)[-1]\n if os.path.exists(appmanifest_path):\n with open(appmanifest_path, \"r\") as appmanifest_file:\n self.appmanifest_data = vdf_parse(appmanifest_file, {})\n\n def __repr__(self):\n return \"<AppManifest: %s>\" % self.appmanifest_path\n\n @property\n def app_state(self):\n return self.appmanifest_data.get('AppState') or {}\n\n @property\n def user_config(self):\n return self.app_state.get('UserConfig') or {}\n\n @property\n def name(self):\n _name = self.app_state.get('name')\n if not _name:\n _name = self.user_config.get('name')\n return _name\n\n @property\n def slug(self):\n return slugify(self.name)\n\n @property\n def installdir(self):\n return self.app_state.get('installdir')\n\n @property\n def states(self):\n \"\"\"Return the states of a Steam game.\"\"\"\n states = []\n state_flags = self.app_state.get('StateFlags', 0)\n state_flags = bin(int(state_flags))[:1:-1]\n for index, flag in enumerate(state_flags):\n if flag == '1':\n states.append(APP_STATE_FLAGS[index + 1])\n return states\n\n def is_installed(self):\n return 'Fully Installed' in self.states\n\n def get_install_path(self):\n if not self.installdir:\n return\n install_path = fix_path_case(os.path.join(self.steamapps_path, \"common\",\n self.installdir))\n if install_path:\n return install_path\n\n def get_platform(self):\n steamapps_paths = get_steamapps_paths()\n if self.steamapps_path in steamapps_paths['linux']:\n return 'linux'\n elif self.steamapps_path in steamapps_paths['windows']:\n return 'windows'\n else:\n raise ValueError(\"Can't find %s in %s\"\n % (self.steamapps_path, steamapps_paths))\n\n def get_runner_name(self):\n platform = self.get_platform()\n if platform == 'linux':\n return 'steam'\n else:\n return 'winesteam'\n\n\ndef get_appmanifests(steamapps_path):\n \"\"\"Return the list for all appmanifest files in a Steam library folder\"\"\"\n return [f for f in os.listdir(steamapps_path)\n if re.match(r'^appmanifest_\\d+.acf$', f)]\n\n\ndef get_steamapps_paths_for_platform(platform_name):\n from lutris.runners import winesteam, steam\n runners = {\n 'linux': steam.steam,\n 'windows': winesteam.winesteam\n }\n runner = runners[platform_name]()\n return runner.get_steamapps_dirs()\n\n\ndef get_steamapps_paths(flat=False, platform=None):\n base_platforms = ['linux', 'windows']\n if flat:\n steamapps_paths = []\n else:\n steamapps_paths = defaultdict(list)\n\n if platform:\n if platform not in base_platforms:\n raise ValueError(\"Illegal value for Steam platform: %s\" % platform)\n platforms = [platform]\n else:\n platforms = base_platforms\n\n for platform in platforms:\n folders = get_steamapps_paths_for_platform(platform)\n if flat:\n steamapps_paths += folders\n else:\n steamapps_paths[platform] = folders\n\n return steamapps_paths\n\n\ndef get_appmanifest_from_appid(steamapps_path, appid):\n \"\"\"Given the steam apps path and appid, return the corresponding appmanifest\"\"\"\n if not steamapps_path:\n raise ValueError(\"steamapps_path is mandatory\")\n if not os.path.exists(steamapps_path):\n raise IOError(\"steamapps_path must be a valid directory\")\n if not appid:\n raise ValueError(\"Missing mandatory appid\")\n appmanifest_path = os.path.join(steamapps_path, \"appmanifest_%s.acf\" % appid)\n if not os.path.exists(appmanifest_path):\n return\n return AppManifest(appmanifest_path)\n\n\ndef get_path_from_appmanifest(steamapps_path, appid):\n \"\"\"Return the path where a Steam game is installed.\"\"\"\n appmanifest = get_appmanifest_from_appid(steamapps_path, appid)\n if not appmanifest:\n return\n return appmanifest.get_install_path()\n\n\ndef mark_as_installed(steamid, runner_name, game_info):\n for key in ['name', 'slug']:\n assert game_info[key]\n logger.info(\"Setting %s as installed\" % game_info['name'])\n config_id = (game_info.get('config_path') or make_game_config_id(game_info['slug']))\n game_id = pga.add_or_update(\n steamid=int(steamid),\n name=game_info['name'],\n runner=runner_name,\n slug=game_info['slug'],\n installed=1,\n configpath=config_id,\n )\n\n game_config = LutrisConfig(\n runner_slug=runner_name,\n game_config_id=config_id,\n )\n game_config.raw_game_config.update({'appid': steamid})\n game_config.save()\n return game_id\n\n\ndef mark_as_uninstalled(game_info):\n for key in ('id', 'name'):\n if key not in game_info:\n raise ValueError(\"Missing %s field in %s\" % (key, game_info))\n logger.info('Setting %s as uninstalled' % game_info['name'])\n game_id = pga.add_or_update(\n id=game_info['id'],\n runner='',\n installed=0\n )\n return game_id\n\n\ndef sync_appmanifest_state(appmanifest_path, name=None, slug=None):\n try:\n appmanifest = AppManifest(appmanifest_path)\n except Exception:\n logger.error(\"Unable to parse file %s\", appmanifest_path)\n return\n if appmanifest.is_installed():\n game_info = {\n 'name': name or appmanifest.name,\n 'slug': slug or appmanifest.slug,\n }\n runner_name = appmanifest.get_runner_name()\n mark_as_installed(appmanifest.steamid, runner_name, game_info)\n\n\ndef sync_with_lutris(platform='linux'):\n steamapps_paths = get_steamapps_paths()\n steam_games_in_lutris = pga.get_games_where(steamid__isnull=False, steamid__not='')\n steamids_in_lutris = set([str(game['steamid']) for game in steam_games_in_lutris])\n seen_ids = set() # Set of Steam appids seen while browsing AppManifests\n\n for steamapps_path in steamapps_paths[platform]:\n appmanifests = get_appmanifests(steamapps_path)\n for appmanifest_file in appmanifests:\n steamid = re.findall(r'(\\d+)', appmanifest_file)[0]\n seen_ids.add(steamid)\n appmanifest_path = os.path.join(steamapps_path, appmanifest_file)\n if steamid not in steamids_in_lutris:\n # New Steam game, not seen before in Lutris,\n if platform != 'linux':\n # Windows games might require additional steps.\n # TODO: Find a way to mark games as \"Not fully configured\"\n # as the status.\n logger.warning(\"Importing Steam game %s but game might require additional configuration\")\n sync_appmanifest_state(appmanifest_path)\n else:\n # Lookup previously installed Steam games\n pga_entry = None\n for game in steam_games_in_lutris:\n if str(game['steamid']) == steamid and not game['installed']:\n pga_entry = game\n break\n if pga_entry:\n sync_appmanifest_state(appmanifest_path, name=pga_entry['name'], slug=pga_entry['slug'])\n unavailable_ids = steamids_in_lutris.difference(seen_ids)\n for steamid in unavailable_ids:\n for game in steam_games_in_lutris:\n runner = 'steam' if platform == 'linux' else 'winesteam'\n if str(game['steamid']) == steamid \\\n and game['installed'] \\\n and game['runner'] == runner:\n mark_as_uninstalled(game)\n", "path": "lutris/services/steam.py"}], "after_files": [{"content": "import os\nimport re\nfrom collections import defaultdict\n\nfrom lutris import pga\nfrom lutris.util.log import logger\nfrom lutris.util.steam import vdf_parse\nfrom lutris.util.system import fix_path_case\nfrom lutris.util.strings import slugify\nfrom lutris.config import make_game_config_id, LutrisConfig\n\nNAME = 'Steam'\n\nAPP_STATE_FLAGS = [\n \"Invalid\",\n \"Uninstalled\",\n \"Update Required\",\n \"Fully Installed\",\n \"Encrypted\",\n \"Locked\",\n \"Files Missing\",\n \"AppRunning\",\n \"Files Corrupt\",\n \"Update Running\",\n \"Update Paused\",\n \"Update Started\",\n \"Uninstalling\",\n \"Backup Running\",\n \"Reconfiguring\",\n \"Validating\",\n \"Adding Files\",\n \"Preallocating\",\n \"Downloading\",\n \"Staging\",\n \"Committing\",\n \"Update Stopping\"\n]\n\n\nclass AppManifest:\n def __init__(self, appmanifest_path):\n self.appmanifest_path = appmanifest_path\n self.steamapps_path, filename = os.path.split(appmanifest_path)\n self.steamid = re.findall(r'(\\d+)', filename)[-1]\n if os.path.exists(appmanifest_path):\n with open(appmanifest_path, \"r\") as appmanifest_file:\n self.appmanifest_data = vdf_parse(appmanifest_file, {})\n\n def __repr__(self):\n return \"<AppManifest: %s>\" % self.appmanifest_path\n\n @property\n def app_state(self):\n return self.appmanifest_data.get('AppState') or {}\n\n @property\n def user_config(self):\n return self.app_state.get('UserConfig') or {}\n\n @property\n def name(self):\n _name = self.app_state.get('name')\n if not _name:\n _name = self.user_config.get('name')\n return _name\n\n @property\n def slug(self):\n return slugify(self.name)\n\n @property\n def installdir(self):\n return self.app_state.get('installdir')\n\n @property\n def states(self):\n \"\"\"Return the states of a Steam game.\"\"\"\n states = []\n state_flags = self.app_state.get('StateFlags', 0)\n state_flags = bin(int(state_flags))[:1:-1]\n for index, flag in enumerate(state_flags):\n if flag == '1':\n states.append(APP_STATE_FLAGS[index + 1])\n return states\n\n def is_installed(self):\n return 'Fully Installed' in self.states\n\n def get_install_path(self):\n if not self.installdir:\n return\n install_path = fix_path_case(os.path.join(self.steamapps_path, \"common\",\n self.installdir))\n if install_path:\n return install_path\n\n def get_platform(self):\n steamapps_paths = get_steamapps_paths()\n if self.steamapps_path in steamapps_paths['linux']:\n return 'linux'\n elif self.steamapps_path in steamapps_paths['windows']:\n return 'windows'\n else:\n raise ValueError(\"Can't find %s in %s\"\n % (self.steamapps_path, steamapps_paths))\n\n def get_runner_name(self):\n platform = self.get_platform()\n if platform == 'linux':\n return 'steam'\n else:\n return 'winesteam'\n\n\ndef get_appmanifests(steamapps_path):\n \"\"\"Return the list for all appmanifest files in a Steam library folder\"\"\"\n return [f for f in os.listdir(steamapps_path)\n if re.match(r'^appmanifest_\\d+.acf$', f)]\n\n\ndef get_steamapps_paths_for_platform(platform_name):\n from lutris.runners import winesteam, steam\n runners = {\n 'linux': steam.steam,\n 'windows': winesteam.winesteam\n }\n runner = runners[platform_name]()\n return runner.get_steamapps_dirs()\n\n\ndef get_steamapps_paths(flat=False, platform=None):\n base_platforms = ['linux', 'windows']\n if flat:\n steamapps_paths = []\n else:\n steamapps_paths = defaultdict(list)\n\n if platform:\n if platform not in base_platforms:\n raise ValueError(\"Illegal value for Steam platform: %s\" % platform)\n platforms = [platform]\n else:\n platforms = base_platforms\n\n for platform in platforms:\n folders = get_steamapps_paths_for_platform(platform)\n if flat:\n steamapps_paths += folders\n else:\n steamapps_paths[platform] = folders\n\n return steamapps_paths\n\n\ndef get_appmanifest_from_appid(steamapps_path, appid):\n \"\"\"Given the steam apps path and appid, return the corresponding appmanifest\"\"\"\n if not steamapps_path:\n raise ValueError(\"steamapps_path is mandatory\")\n if not os.path.exists(steamapps_path):\n raise IOError(\"steamapps_path must be a valid directory\")\n if not appid:\n raise ValueError(\"Missing mandatory appid\")\n appmanifest_path = os.path.join(steamapps_path, \"appmanifest_%s.acf\" % appid)\n if not os.path.exists(appmanifest_path):\n return\n return AppManifest(appmanifest_path)\n\n\ndef get_path_from_appmanifest(steamapps_path, appid):\n \"\"\"Return the path where a Steam game is installed.\"\"\"\n appmanifest = get_appmanifest_from_appid(steamapps_path, appid)\n if not appmanifest:\n return\n return appmanifest.get_install_path()\n\n\ndef mark_as_installed(steamid, runner_name, game_info):\n for key in ['name', 'slug']:\n assert game_info[key]\n logger.info(\"Setting %s as installed\" % game_info['name'])\n config_id = (game_info.get('config_path') or make_game_config_id(game_info['slug']))\n game_id = pga.add_or_update(\n steamid=int(steamid),\n name=game_info['name'],\n runner=runner_name,\n slug=game_info['slug'],\n installed=1,\n configpath=config_id,\n )\n\n game_config = LutrisConfig(\n runner_slug=runner_name,\n game_config_id=config_id,\n )\n game_config.raw_game_config.update({'appid': steamid})\n game_config.save()\n return game_id\n\n\ndef mark_as_uninstalled(game_info):\n for key in ('id', 'name'):\n if key not in game_info:\n raise ValueError(\"Missing %s field in %s\" % (key, game_info))\n logger.info('Setting %s as uninstalled' % game_info['name'])\n game_id = pga.add_or_update(\n id=game_info['id'],\n runner='',\n installed=0\n )\n return game_id\n\n\ndef sync_appmanifest_state(appmanifest_path, name=None, slug=None):\n try:\n appmanifest = AppManifest(appmanifest_path)\n except Exception:\n logger.error(\"Unable to parse file %s\", appmanifest_path)\n return\n if appmanifest.is_installed():\n game_info = {\n 'name': name or appmanifest.name,\n 'slug': slug or appmanifest.slug,\n }\n runner_name = appmanifest.get_runner_name()\n mark_as_installed(appmanifest.steamid, runner_name, game_info)\n\n\ndef sync_with_lutris(platform='linux'):\n steamapps_paths = get_steamapps_paths()\n steam_games_in_lutris = pga.get_games_where(steamid__isnull=False, steamid__not='')\n proton_ids = [\"858280\", \"930400\"]\n steamids_in_lutris = set([str(game['steamid']) for game in steam_games_in_lutris])\n seen_ids = set() # Set of Steam appids seen while browsing AppManifests\n\n for steamapps_path in steamapps_paths[platform]:\n appmanifests = get_appmanifests(steamapps_path)\n for appmanifest_file in appmanifests:\n steamid = re.findall(r'(\\d+)', appmanifest_file)[0]\n seen_ids.add(steamid)\n appmanifest_path = os.path.join(steamapps_path, appmanifest_file)\n if steamid not in steamids_in_lutris and steamid not in proton_ids:\n # New Steam game, not seen before in Lutris,\n if platform != 'linux':\n # Windows games might require additional steps.\n # TODO: Find a way to mark games as \"Not fully configured\"\n # as the status.\n logger.warning(\"Importing Steam game %s but game might require additional configuration\")\n sync_appmanifest_state(appmanifest_path)\n else:\n # Lookup previously installed Steam games\n pga_entry = None\n for game in steam_games_in_lutris:\n if str(game['steamid']) == steamid and not game['installed']:\n pga_entry = game\n break\n if pga_entry:\n sync_appmanifest_state(appmanifest_path, name=pga_entry['name'], slug=pga_entry['slug'])\n unavailable_ids = steamids_in_lutris.difference(seen_ids)\n for steamid in unavailable_ids:\n for game in steam_games_in_lutris:\n runner = 'steam' if platform == 'linux' else 'winesteam'\n if str(game['steamid']) == steamid \\\n and game['installed'] \\\n and game['runner'] == runner:\n mark_as_uninstalled(game)\n", "path": "lutris/services/steam.py"}]}
| 3,192 | 290 |
gh_patches_debug_66972
|
rasdani/github-patches
|
git_diff
|
pandas-dev__pandas-19628
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DateTimeIndex.__iter__().next() rounds time to microseconds, when timezone aware
#### Code Sample
```python
>> import pandas as pd
>> datetimeindex = pd.DatetimeIndex(["2018-02-08 15:00:00.168456358"])
>> datetimeindex
DatetimeIndex(['2018-02-08 15:00:00.168456358'], dtype='datetime64[ns]', freq=None)
>> datetimeindex = datetimeindex.tz_localize(datetime.timezone.utc)
>> datetimeindex
DatetimeIndex(['2018-02-08 15:00:00.168456358+00:00'], dtype='datetime64[ns, UTC+00:00]', freq=None)
>> datetimeindex.__getitem__(0)
Timestamp('2018-02-08 15:00:00.168456358+0000', tz='UTC+00:00')
>> datetimeindex.__iter__().__next__()
Timestamp('2018-02-08 15:00:00.168456+0000', tz='UTC+00:00')
```
#### Problem description
When using localize DateTimeIndex with nanosecond precision, __getitem__ behavious differs from __iter__().__next__ behaviour, as when iterating thought the DateTimeIndex the date is round to microseconds. This doen not happends if the DatetimeIndex has no timezone.
#### Output of ``pd.show_versions()``
<details>
INSTALLED VERSIONS
------------------
commit: None
python: 3.4.2.final.0
python-bits: 64
OS: Linux
OS-release: 4.9.0-0.bpo.2-amd64
machine: x86_64
processor:
byteorder: little
LC_ALL: None
LANG: en_US.UTF-8
LOCALE: en_US.UTF-8
pandas: 0.22.0
pytest: None
pip: 9.0.1
setuptools: 36.5.0
Cython: None
numpy: 1.14.0
scipy: 1.0.0
pyarrow: None
xarray: None
IPython: 6.2.1
sphinx: None
patsy: None
dateutil: 2.6.1
pytz: 2017.3
blosc: None
bottleneck: None
tables: None
numexpr: None
feather: None
matplotlib: 2.0.2
openpyxl: None
xlrd: None
xlwt: None
xlsxwriter: None
lxml: None
bs4: None
html5lib: None
sqlalchemy: None
pymysql: None
psycopg2: None
jinja2: None
s3fs: None
fastparquet: None
pandas_gbq: None
pandas_datareader: None
</details>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pandas/conftest.py`
Content:
```
1 import pytest
2
3 from distutils.version import LooseVersion
4 import numpy
5 import pandas
6 import dateutil
7 import pandas.util._test_decorators as td
8
9
10 def pytest_addoption(parser):
11 parser.addoption("--skip-slow", action="store_true",
12 help="skip slow tests")
13 parser.addoption("--skip-network", action="store_true",
14 help="skip network tests")
15 parser.addoption("--run-high-memory", action="store_true",
16 help="run high memory tests")
17 parser.addoption("--only-slow", action="store_true",
18 help="run only slow tests")
19
20
21 def pytest_runtest_setup(item):
22 if 'slow' in item.keywords and item.config.getoption("--skip-slow"):
23 pytest.skip("skipping due to --skip-slow")
24
25 if 'slow' not in item.keywords and item.config.getoption("--only-slow"):
26 pytest.skip("skipping due to --only-slow")
27
28 if 'network' in item.keywords and item.config.getoption("--skip-network"):
29 pytest.skip("skipping due to --skip-network")
30
31 if 'high_memory' in item.keywords and not item.config.getoption(
32 "--run-high-memory"):
33 pytest.skip(
34 "skipping high memory test since --run-high-memory was not set")
35
36
37 # Configurations for all tests and all test modules
38
39 @pytest.fixture(autouse=True)
40 def configure_tests():
41 pandas.set_option('chained_assignment', 'raise')
42
43
44 # For running doctests: make np and pd names available
45
46 @pytest.fixture(autouse=True)
47 def add_imports(doctest_namespace):
48 doctest_namespace['np'] = numpy
49 doctest_namespace['pd'] = pandas
50
51
52 @pytest.fixture(params=['bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'])
53 def spmatrix(request):
54 from scipy import sparse
55 return getattr(sparse, request.param + '_matrix')
56
57
58 @pytest.fixture
59 def ip():
60 """
61 Get an instance of IPython.InteractiveShell.
62
63 Will raise a skip if IPython is not installed.
64 """
65
66 pytest.importorskip('IPython', minversion="6.0.0")
67 from IPython.core.interactiveshell import InteractiveShell
68 return InteractiveShell()
69
70
71 is_dateutil_le_261 = pytest.mark.skipif(
72 LooseVersion(dateutil.__version__) > LooseVersion('2.6.1'),
73 reason="dateutil api change version")
74 is_dateutil_gt_261 = pytest.mark.skipif(
75 LooseVersion(dateutil.__version__) <= LooseVersion('2.6.1'),
76 reason="dateutil stable version")
77
78
79 @pytest.fixture(params=[None, 'gzip', 'bz2', 'zip',
80 pytest.param('xz', marks=td.skip_if_no_lzma)])
81 def compression(request):
82 """
83 Fixture for trying common compression types in compression tests
84 """
85 return request.param
86
87
88 @pytest.fixture(params=[None, 'gzip', 'bz2',
89 pytest.param('xz', marks=td.skip_if_no_lzma)])
90 def compression_no_zip(request):
91 """
92 Fixture for trying common compression types in compression tests
93 except zip
94 """
95 return request.param
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pandas/conftest.py b/pandas/conftest.py
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -93,3 +93,9 @@
except zip
"""
return request.param
+
+
[email protected](scope='module')
+def datetime_tz_utc():
+ from datetime import timezone
+ return timezone.utc
|
{"golden_diff": "diff --git a/pandas/conftest.py b/pandas/conftest.py\n--- a/pandas/conftest.py\n+++ b/pandas/conftest.py\n@@ -93,3 +93,9 @@\n except zip\n \"\"\"\n return request.param\n+\n+\[email protected](scope='module')\n+def datetime_tz_utc():\n+ from datetime import timezone\n+ return timezone.utc\n", "issue": "DateTimeIndex.__iter__().next() rounds time to microseconds, when timezone aware\n#### Code Sample\r\n\r\n```python\r\n>> import pandas as pd\r\n>> datetimeindex = pd.DatetimeIndex([\"2018-02-08 15:00:00.168456358\"])\r\n>> datetimeindex\r\nDatetimeIndex(['2018-02-08 15:00:00.168456358'], dtype='datetime64[ns]', freq=None)\r\n>> datetimeindex = datetimeindex.tz_localize(datetime.timezone.utc)\r\n>> datetimeindex\r\nDatetimeIndex(['2018-02-08 15:00:00.168456358+00:00'], dtype='datetime64[ns, UTC+00:00]', freq=None)\r\n>> datetimeindex.__getitem__(0)\r\nTimestamp('2018-02-08 15:00:00.168456358+0000', tz='UTC+00:00')\r\n>> datetimeindex.__iter__().__next__()\r\nTimestamp('2018-02-08 15:00:00.168456+0000', tz='UTC+00:00')\r\n```\r\n#### Problem description\r\n\r\nWhen using localize DateTimeIndex with nanosecond precision, __getitem__ behavious differs from __iter__().__next__ behaviour, as when iterating thought the DateTimeIndex the date is round to microseconds. This doen not happends if the DatetimeIndex has no timezone.\r\n\r\n#### Output of ``pd.show_versions()``\r\n\r\n<details>\r\n\r\nINSTALLED VERSIONS\r\n------------------\r\ncommit: None\r\npython: 3.4.2.final.0\r\npython-bits: 64\r\nOS: Linux\r\nOS-release: 4.9.0-0.bpo.2-amd64\r\nmachine: x86_64\r\nprocessor: \r\nbyteorder: little\r\nLC_ALL: None\r\nLANG: en_US.UTF-8\r\nLOCALE: en_US.UTF-8\r\n\r\npandas: 0.22.0\r\npytest: None\r\npip: 9.0.1\r\nsetuptools: 36.5.0\r\nCython: None\r\nnumpy: 1.14.0\r\nscipy: 1.0.0\r\npyarrow: None\r\nxarray: None\r\nIPython: 6.2.1\r\nsphinx: None\r\npatsy: None\r\ndateutil: 2.6.1\r\npytz: 2017.3\r\nblosc: None\r\nbottleneck: None\r\ntables: None\r\nnumexpr: None\r\nfeather: None\r\nmatplotlib: 2.0.2\r\nopenpyxl: None\r\nxlrd: None\r\nxlwt: None\r\nxlsxwriter: None\r\nlxml: None\r\nbs4: None\r\nhtml5lib: None\r\nsqlalchemy: None\r\npymysql: None\r\npsycopg2: None\r\njinja2: None\r\ns3fs: None\r\nfastparquet: None\r\npandas_gbq: None\r\npandas_datareader: None\r\n\r\n</details>\r\n\n", "before_files": [{"content": "import pytest\n\nfrom distutils.version import LooseVersion\nimport numpy\nimport pandas\nimport dateutil\nimport pandas.util._test_decorators as td\n\n\ndef pytest_addoption(parser):\n parser.addoption(\"--skip-slow\", action=\"store_true\",\n help=\"skip slow tests\")\n parser.addoption(\"--skip-network\", action=\"store_true\",\n help=\"skip network tests\")\n parser.addoption(\"--run-high-memory\", action=\"store_true\",\n help=\"run high memory tests\")\n parser.addoption(\"--only-slow\", action=\"store_true\",\n help=\"run only slow tests\")\n\n\ndef pytest_runtest_setup(item):\n if 'slow' in item.keywords and item.config.getoption(\"--skip-slow\"):\n pytest.skip(\"skipping due to --skip-slow\")\n\n if 'slow' not in item.keywords and item.config.getoption(\"--only-slow\"):\n pytest.skip(\"skipping due to --only-slow\")\n\n if 'network' in item.keywords and item.config.getoption(\"--skip-network\"):\n pytest.skip(\"skipping due to --skip-network\")\n\n if 'high_memory' in item.keywords and not item.config.getoption(\n \"--run-high-memory\"):\n pytest.skip(\n \"skipping high memory test since --run-high-memory was not set\")\n\n\n# Configurations for all tests and all test modules\n\[email protected](autouse=True)\ndef configure_tests():\n pandas.set_option('chained_assignment', 'raise')\n\n\n# For running doctests: make np and pd names available\n\[email protected](autouse=True)\ndef add_imports(doctest_namespace):\n doctest_namespace['np'] = numpy\n doctest_namespace['pd'] = pandas\n\n\[email protected](params=['bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'])\ndef spmatrix(request):\n from scipy import sparse\n return getattr(sparse, request.param + '_matrix')\n\n\[email protected]\ndef ip():\n \"\"\"\n Get an instance of IPython.InteractiveShell.\n\n Will raise a skip if IPython is not installed.\n \"\"\"\n\n pytest.importorskip('IPython', minversion=\"6.0.0\")\n from IPython.core.interactiveshell import InteractiveShell\n return InteractiveShell()\n\n\nis_dateutil_le_261 = pytest.mark.skipif(\n LooseVersion(dateutil.__version__) > LooseVersion('2.6.1'),\n reason=\"dateutil api change version\")\nis_dateutil_gt_261 = pytest.mark.skipif(\n LooseVersion(dateutil.__version__) <= LooseVersion('2.6.1'),\n reason=\"dateutil stable version\")\n\n\[email protected](params=[None, 'gzip', 'bz2', 'zip',\n pytest.param('xz', marks=td.skip_if_no_lzma)])\ndef compression(request):\n \"\"\"\n Fixture for trying common compression types in compression tests\n \"\"\"\n return request.param\n\n\[email protected](params=[None, 'gzip', 'bz2',\n pytest.param('xz', marks=td.skip_if_no_lzma)])\ndef compression_no_zip(request):\n \"\"\"\n Fixture for trying common compression types in compression tests\n except zip\n \"\"\"\n return request.param\n", "path": "pandas/conftest.py"}], "after_files": [{"content": "import pytest\n\nfrom distutils.version import LooseVersion\nimport numpy\nimport pandas\nimport dateutil\nimport pandas.util._test_decorators as td\n\n\ndef pytest_addoption(parser):\n parser.addoption(\"--skip-slow\", action=\"store_true\",\n help=\"skip slow tests\")\n parser.addoption(\"--skip-network\", action=\"store_true\",\n help=\"skip network tests\")\n parser.addoption(\"--run-high-memory\", action=\"store_true\",\n help=\"run high memory tests\")\n parser.addoption(\"--only-slow\", action=\"store_true\",\n help=\"run only slow tests\")\n\n\ndef pytest_runtest_setup(item):\n if 'slow' in item.keywords and item.config.getoption(\"--skip-slow\"):\n pytest.skip(\"skipping due to --skip-slow\")\n\n if 'slow' not in item.keywords and item.config.getoption(\"--only-slow\"):\n pytest.skip(\"skipping due to --only-slow\")\n\n if 'network' in item.keywords and item.config.getoption(\"--skip-network\"):\n pytest.skip(\"skipping due to --skip-network\")\n\n if 'high_memory' in item.keywords and not item.config.getoption(\n \"--run-high-memory\"):\n pytest.skip(\n \"skipping high memory test since --run-high-memory was not set\")\n\n\n# Configurations for all tests and all test modules\n\[email protected](autouse=True)\ndef configure_tests():\n pandas.set_option('chained_assignment', 'raise')\n\n\n# For running doctests: make np and pd names available\n\[email protected](autouse=True)\ndef add_imports(doctest_namespace):\n doctest_namespace['np'] = numpy\n doctest_namespace['pd'] = pandas\n\n\[email protected](params=['bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'])\ndef spmatrix(request):\n from scipy import sparse\n return getattr(sparse, request.param + '_matrix')\n\n\[email protected]\ndef ip():\n \"\"\"\n Get an instance of IPython.InteractiveShell.\n\n Will raise a skip if IPython is not installed.\n \"\"\"\n\n pytest.importorskip('IPython', minversion=\"6.0.0\")\n from IPython.core.interactiveshell import InteractiveShell\n return InteractiveShell()\n\n\nis_dateutil_le_261 = pytest.mark.skipif(\n LooseVersion(dateutil.__version__) > LooseVersion('2.6.1'),\n reason=\"dateutil api change version\")\nis_dateutil_gt_261 = pytest.mark.skipif(\n LooseVersion(dateutil.__version__) <= LooseVersion('2.6.1'),\n reason=\"dateutil stable version\")\n\n\[email protected](params=[None, 'gzip', 'bz2', 'zip',\n pytest.param('xz', marks=td.skip_if_no_lzma)])\ndef compression(request):\n \"\"\"\n Fixture for trying common compression types in compression tests\n \"\"\"\n return request.param\n\n\[email protected](params=[None, 'gzip', 'bz2',\n pytest.param('xz', marks=td.skip_if_no_lzma)])\ndef compression_no_zip(request):\n \"\"\"\n Fixture for trying common compression types in compression tests\n except zip\n \"\"\"\n return request.param\n\n\[email protected](scope='module')\ndef datetime_tz_utc():\n from datetime import timezone\n return timezone.utc\n", "path": "pandas/conftest.py"}]}
| 1,847 | 89 |
gh_patches_debug_25772
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-8368
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Prefill customer_email in stripe checkout page
Self explainable.
Parent issue: #8146
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/helpers/payment.py`
Content:
```
1 import json
2
3 import omise
4 import paypalrestsdk
5 import requests
6 import stripe
7 from forex_python.converter import CurrencyRates
8
9 from app.api.helpers import checksum
10 from app.api.helpers.cache import cache
11 from app.api.helpers.db import safe_query, save_to_db
12 from app.api.helpers.errors import ConflictError, ForbiddenError
13 from app.api.helpers.utilities import represents_int, round_money
14 from app.models.order import Order
15 from app.models.stripe_authorization import StripeAuthorization
16 from app.settings import Environment, get_settings
17
18
19 @cache.memoize(5)
20 def forex(from_currency, to_currency, amount):
21 try:
22 currency_rates = CurrencyRates()
23 return currency_rates.convert(from_currency, to_currency, amount)
24 except:
25 return amount
26
27
28 class StripePaymentsManager:
29 """
30 Class to manage payments through Stripe.
31 """
32
33 @staticmethod
34 def get_credentials(event=None):
35 """
36 If the event parameter is None, It returns the secret and publishable key of the Admin's Stripe account.
37 Else, it returns the corresponding values for the event organizer's account.
38 :param event:
39 :return: Stripe secret and publishable keys.
40 """
41 if not event:
42 settings = get_settings()
43 if (
44 settings['app_environment'] == 'development'
45 and settings['stripe_test_secret_key']
46 and settings['stripe_test_publishable_key']
47 ):
48 return {
49 'SECRET_KEY': settings['stripe_test_secret_key'],
50 'PUBLISHABLE_KEY': settings["stripe_test_publishable_key"],
51 }
52 if settings['stripe_secret_key'] and settings["stripe_publishable_key"]:
53 return {
54 'SECRET_KEY': settings['stripe_secret_key'],
55 'PUBLISHABLE_KEY': settings["stripe_publishable_key"],
56 }
57 return None
58 if represents_int(event):
59 authorization = StripeAuthorization.query.filter_by(event_id=event).first()
60 else:
61 authorization = event.stripe_authorization
62 if authorization:
63 return {
64 'SECRET_KEY': authorization.stripe_secret_key,
65 'PUBLISHABLE_KEY': authorization.stripe_publishable_key,
66 }
67 return None
68
69 @staticmethod
70 def get_event_organizer_credentials_from_stripe(stripe_auth_code):
71 """
72 Uses the stripe_auth_code to get the other credentials for the event organizer's stripe account
73 :param stripe_auth_code: stripe authorization code
74 :return: response from stripe
75 """
76 credentials = StripePaymentsManager.get_credentials()
77
78 if not credentials:
79 raise ForbiddenError(
80 {'pointer': ''},
81 "Stripe payment isn't configured properly for the Platform",
82 )
83
84 data = {
85 'client_secret': credentials['SECRET_KEY'],
86 'code': stripe_auth_code,
87 'grant_type': 'authorization_code',
88 }
89
90 response = requests.post('https://connect.stripe.com/oauth/token', data=data)
91 return json.loads(response.text)
92
93 @staticmethod
94 def capture_payment(order_invoice, currency=None, credentials=None):
95 """
96 Capture payments through stripe.
97 :param order_invoice: Order to be charged for
98 :param currency: Currency of the order amount.
99 :param credentials: Stripe credentials.
100 :return: charge/None depending on success/failure.
101 """
102 if not credentials:
103 credentials = StripePaymentsManager.get_credentials(order_invoice.event)
104
105 if not credentials:
106 raise ConflictError(
107 {'pointer': ''}, 'Stripe credentials not found for the event.'
108 )
109 stripe.api_key = credentials['SECRET_KEY']
110 if not currency:
111 currency = order_invoice.event.payment_currency
112
113 if not currency or currency == "":
114 currency = "USD"
115
116 frontend_url = get_settings()['frontend_url']
117
118 try:
119 # payment_method_types = ['card', 'klarna']
120 payment_method_types = ['card']
121 # if currency.lower() == 'eur':
122 # payment_method_types.append('sepa_debit')
123
124 session = stripe.checkout.Session.create(
125 payment_method_types= payment_method_types,
126 line_items=[{
127 'price_data': {
128 'currency': currency.lower(),
129 'product_data': {
130 'name': order_invoice.event.name,
131 },
132 'unit_amount': int(order_invoice.amount * 100),
133 },
134 'quantity': 1,
135 }],
136 mode='payment',
137 success_url=f"{frontend_url}/orders/{order_invoice.identifier}/view",
138 cancel_url=f"{frontend_url}/orders/{order_invoice.identifier}/view",
139 )
140
141 return session
142
143 except Exception as e:
144 raise ConflictError({'pointer': ''}, str(e))
145
146 @staticmethod
147 def retrieve_session(event_id, stripe_session_id):
148 credentials = StripePaymentsManager.get_credentials(event_id)
149
150 if not credentials:
151 raise ConflictError(
152 {'pointer': ''}, 'Stripe credentials not found for the event.'
153 )
154 stripe.api_key = credentials['SECRET_KEY']
155 session = stripe.checkout.Session.retrieve(stripe_session_id)
156
157 return session
158
159 @staticmethod
160 def retrieve_payment_intent(event_id, payment_intent_id):
161 credentials = StripePaymentsManager.get_credentials(event_id)
162
163 if not credentials:
164 raise ConflictError(
165 {'pointer': ''}, 'Stripe credentials not found for the event.'
166 )
167 stripe.api_key = credentials['SECRET_KEY']
168 payment_intent = stripe.PaymentIntent.retrieve(payment_intent_id)
169
170 return payment_intent
171
172
173 class PayPalPaymentsManager:
174 """
175 Class to manage payment through Paypal REST API.
176 """
177
178 @staticmethod
179 def configure_paypal():
180 """
181 Configure the paypal sdk
182 :return: Credentials
183 """
184 settings = get_settings()
185 # Use Sandbox by default.
186 paypal_mode = settings.get(
187 'paypal_mode',
188 'live'
189 if (settings['app_environment'] == Environment.PRODUCTION)
190 else 'sandbox',
191 )
192 paypal_key = None
193 if paypal_mode == 'sandbox':
194 paypal_key = 'paypal_sandbox'
195 elif paypal_mode == 'live':
196 paypal_key = 'paypal'
197
198 if not paypal_key:
199 raise ConflictError(
200 {'pointer': ''}, "Paypal Mode must be 'live' or 'sandbox'"
201 )
202
203 paypal_client = settings.get(f'{paypal_key}_client', None)
204 paypal_secret = settings.get(f'{paypal_key}_secret', None)
205
206 if not paypal_client or not paypal_secret:
207 raise ConflictError(
208 {'pointer': ''},
209 "Payments through Paypal have not been configured on the platform",
210 )
211 return paypalrestsdk.configure(
212 {
213 "mode": paypal_mode,
214 "client_id": paypal_client,
215 "client_secret": paypal_secret,
216 }
217 )
218
219 @staticmethod
220 def create_payment(order, return_url, cancel_url, payee_email=None):
221 """
222 Create payment for an order
223 :param order: Order to create payment for.
224 :param return_url: return url for the payment.
225 :param cancel_url: cancel_url for the payment.
226 :param payee_email: email of the payee. Default to event paypal email if not set
227 :return: request_id or the error message along with an indicator.
228 """
229 payee_email = payee_email or order.event.paypal_email
230 if not payee_email:
231 raise ConflictError(
232 {'pointer': ''},
233 "Payments through Paypal hasn't been configured for the billing",
234 )
235
236 PayPalPaymentsManager.configure_paypal()
237
238 payment = paypalrestsdk.Payment(
239 {
240 "intent": "sale",
241 "payer": {"payment_method": "paypal"},
242 "redirect_urls": {"return_url": return_url, "cancel_url": cancel_url},
243 "transactions": [
244 {
245 "amount": {
246 "total": float(round_money(order.amount)),
247 "currency": order.event.payment_currency,
248 },
249 "payee": {"email": payee_email},
250 }
251 ],
252 }
253 )
254
255 if payment.create():
256 return True, payment.id
257 return False, payment.error
258
259 @staticmethod
260 def verify_payment(payment_id, order):
261 """
262 Verify Paypal payment one more time for paying with Paypal in mobile client
263 """
264 PayPalPaymentsManager.configure_paypal()
265 try:
266 payment_server = paypalrestsdk.Payment.find(payment_id)
267 if payment_server.state != 'approved':
268 return (
269 False,
270 'Payment has not been approved yet. Status is '
271 + payment_server.state
272 + '.',
273 )
274
275 # Get the most recent transaction
276 transaction = payment_server.transactions[0]
277 amount_server = transaction.amount.total
278 currency_server = transaction.amount.currency
279 sale_state = transaction.related_resources[0].sale.state
280
281 if float(amount_server) != order.amount:
282 return False, 'Payment amount does not match order'
283 if currency_server != order.event.payment_currency:
284 return False, 'Payment currency does not match order'
285 if sale_state != 'completed':
286 return False, 'Sale not completed'
287 if PayPalPaymentsManager.used_payment(payment_id, order):
288 return False, 'Payment already been verified'
289 return True, None
290 except paypalrestsdk.ResourceNotFound:
291 return False, 'Payment Not Found'
292
293 @staticmethod
294 def used_payment(payment_id, order):
295 """
296 Function to check for recycling of payment IDs
297 """
298 if Order.query.filter(Order.paypal_token == payment_id).first() is None:
299 order.paypal_token = payment_id
300 save_to_db(order)
301 return False
302 return True
303
304 @staticmethod
305 def execute_payment(paypal_payer_id, paypal_payment_id):
306 """
307 Execute payemnt and charge the user.
308 :param paypal_payment_id: payment_id
309 :param paypal_payer_id: payer_id
310 :return: Result of the transaction.
311 """
312 PayPalPaymentsManager.configure_paypal()
313 payment = paypalrestsdk.Payment.find(paypal_payment_id)
314
315 if payment.execute({"payer_id": paypal_payer_id}):
316 return True, 'Successfully Executed'
317 return False, payment.error
318
319
320 class AliPayPaymentsManager:
321 """
322 Class to manage AliPay Payments
323 """
324
325 @staticmethod
326 def create_source(amount, currency, redirect_return_uri):
327 stripe.api_key = get_settings()['alipay_publishable_key']
328 response = stripe.Source.create(
329 type='alipay',
330 currency=currency,
331 amount=amount,
332 redirect={'return_url': redirect_return_uri},
333 )
334 return response
335
336 @staticmethod
337 def charge_source(order_identifier):
338 order = safe_query(Order, 'identifier', order_identifier, 'identifier')
339 stripe.api_key = get_settings()['alipay_secret_key']
340 charge = stripe.Charge.create(
341 amount=int(order.amount),
342 currency=order.event.payment_currency,
343 source=order.order_notes,
344 )
345 return charge
346
347
348 class OmisePaymentsManager:
349 """
350 Class to manage Omise Payments
351 """
352
353 @staticmethod
354 def charge_payment(order_identifier, token):
355 if get_settings()['app_environment'] == Environment.PRODUCTION:
356 omise.api_secret = get_settings()['omise_test_secret']
357 omise.api_public = get_settings()['omise_test_public']
358 else:
359 omise.api_secret = get_settings()['omise_test_secret']
360 omise.api_public = get_settings()['omise_test_public']
361 order = safe_query(Order, 'identifier', order_identifier, 'identifier')
362 charge = omise.Charge.create(
363 amount=int(round(order.amount)),
364 currency=order.event.payment_currency,
365 card=token,
366 metadata={"order_id": str(order_identifier), "status": True},
367 )
368 return charge
369
370
371 class PaytmPaymentsManager:
372 """
373 Class to manage PayTM payments
374 """
375
376 @property
377 def paytm_endpoint(self):
378 if get_settings()['paytm_mode'] == 'test':
379 url = "https://securegw-stage.paytm.in/theia/api/v1/"
380 else:
381 url = "https://securegw.paytm.in/theia/api/v1/"
382 return url
383
384 @staticmethod
385 def generate_checksum(paytm_params):
386 if get_settings()['paytm_mode'] == 'test':
387 merchant_key = get_settings()['paytm_sandbox_secret']
388 else:
389 merchant_key = get_settings()['paytm_live_secret']
390 return checksum.generate_checksum_by_str(
391 json.dumps(paytm_params["body"]), merchant_key
392 )
393
394 @staticmethod
395 def hit_paytm_endpoint(url, head, body=None):
396 paytm_params = {}
397 paytm_params["body"] = body
398 paytm_params["head"] = head
399 post_data = json.dumps(paytm_params)
400 response = requests.post(
401 url, data=post_data, headers={"Content-type": "application/json"}
402 ).json()
403 return response
404
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/api/helpers/payment.py b/app/api/helpers/payment.py
--- a/app/api/helpers/payment.py
+++ b/app/api/helpers/payment.py
@@ -122,17 +122,20 @@
# payment_method_types.append('sepa_debit')
session = stripe.checkout.Session.create(
- payment_method_types= payment_method_types,
- line_items=[{
- 'price_data': {
- 'currency': currency.lower(),
- 'product_data': {
- 'name': order_invoice.event.name,
+ customer_email=order_invoice.user.email,
+ payment_method_types=payment_method_types,
+ line_items=[
+ {
+ 'price_data': {
+ 'currency': currency.lower(),
+ 'product_data': {
+ 'name': order_invoice.event.name,
+ },
+ 'unit_amount': int(order_invoice.amount * 100),
},
- 'unit_amount': int(order_invoice.amount * 100),
- },
- 'quantity': 1,
- }],
+ 'quantity': 1,
+ }
+ ],
mode='payment',
success_url=f"{frontend_url}/orders/{order_invoice.identifier}/view",
cancel_url=f"{frontend_url}/orders/{order_invoice.identifier}/view",
|
{"golden_diff": "diff --git a/app/api/helpers/payment.py b/app/api/helpers/payment.py\n--- a/app/api/helpers/payment.py\n+++ b/app/api/helpers/payment.py\n@@ -122,17 +122,20 @@\n # payment_method_types.append('sepa_debit')\n \n session = stripe.checkout.Session.create(\n- payment_method_types= payment_method_types,\n- line_items=[{\n- 'price_data': {\n- 'currency': currency.lower(),\n- 'product_data': {\n- 'name': order_invoice.event.name,\n+ customer_email=order_invoice.user.email,\n+ payment_method_types=payment_method_types,\n+ line_items=[\n+ {\n+ 'price_data': {\n+ 'currency': currency.lower(),\n+ 'product_data': {\n+ 'name': order_invoice.event.name,\n+ },\n+ 'unit_amount': int(order_invoice.amount * 100),\n },\n- 'unit_amount': int(order_invoice.amount * 100),\n- },\n- 'quantity': 1,\n- }],\n+ 'quantity': 1,\n+ }\n+ ],\n mode='payment',\n success_url=f\"{frontend_url}/orders/{order_invoice.identifier}/view\",\n cancel_url=f\"{frontend_url}/orders/{order_invoice.identifier}/view\",\n", "issue": "Prefill customer_email in stripe checkout page\nSelf explainable.\r\n\r\nParent issue: #8146 \n", "before_files": [{"content": "import json\n\nimport omise\nimport paypalrestsdk\nimport requests\nimport stripe\nfrom forex_python.converter import CurrencyRates\n\nfrom app.api.helpers import checksum\nfrom app.api.helpers.cache import cache\nfrom app.api.helpers.db import safe_query, save_to_db\nfrom app.api.helpers.errors import ConflictError, ForbiddenError\nfrom app.api.helpers.utilities import represents_int, round_money\nfrom app.models.order import Order\nfrom app.models.stripe_authorization import StripeAuthorization\nfrom app.settings import Environment, get_settings\n\n\[email protected](5)\ndef forex(from_currency, to_currency, amount):\n try:\n currency_rates = CurrencyRates()\n return currency_rates.convert(from_currency, to_currency, amount)\n except:\n return amount\n\n\nclass StripePaymentsManager:\n \"\"\"\n Class to manage payments through Stripe.\n \"\"\"\n\n @staticmethod\n def get_credentials(event=None):\n \"\"\"\n If the event parameter is None, It returns the secret and publishable key of the Admin's Stripe account.\n Else, it returns the corresponding values for the event organizer's account.\n :param event:\n :return: Stripe secret and publishable keys.\n \"\"\"\n if not event:\n settings = get_settings()\n if (\n settings['app_environment'] == 'development'\n and settings['stripe_test_secret_key']\n and settings['stripe_test_publishable_key']\n ):\n return {\n 'SECRET_KEY': settings['stripe_test_secret_key'],\n 'PUBLISHABLE_KEY': settings[\"stripe_test_publishable_key\"],\n }\n if settings['stripe_secret_key'] and settings[\"stripe_publishable_key\"]:\n return {\n 'SECRET_KEY': settings['stripe_secret_key'],\n 'PUBLISHABLE_KEY': settings[\"stripe_publishable_key\"],\n }\n return None\n if represents_int(event):\n authorization = StripeAuthorization.query.filter_by(event_id=event).first()\n else:\n authorization = event.stripe_authorization\n if authorization:\n return {\n 'SECRET_KEY': authorization.stripe_secret_key,\n 'PUBLISHABLE_KEY': authorization.stripe_publishable_key,\n }\n return None\n\n @staticmethod\n def get_event_organizer_credentials_from_stripe(stripe_auth_code):\n \"\"\"\n Uses the stripe_auth_code to get the other credentials for the event organizer's stripe account\n :param stripe_auth_code: stripe authorization code\n :return: response from stripe\n \"\"\"\n credentials = StripePaymentsManager.get_credentials()\n\n if not credentials:\n raise ForbiddenError(\n {'pointer': ''},\n \"Stripe payment isn't configured properly for the Platform\",\n )\n\n data = {\n 'client_secret': credentials['SECRET_KEY'],\n 'code': stripe_auth_code,\n 'grant_type': 'authorization_code',\n }\n\n response = requests.post('https://connect.stripe.com/oauth/token', data=data)\n return json.loads(response.text)\n\n @staticmethod\n def capture_payment(order_invoice, currency=None, credentials=None):\n \"\"\"\n Capture payments through stripe.\n :param order_invoice: Order to be charged for\n :param currency: Currency of the order amount.\n :param credentials: Stripe credentials.\n :return: charge/None depending on success/failure.\n \"\"\"\n if not credentials:\n credentials = StripePaymentsManager.get_credentials(order_invoice.event)\n\n if not credentials:\n raise ConflictError(\n {'pointer': ''}, 'Stripe credentials not found for the event.'\n )\n stripe.api_key = credentials['SECRET_KEY']\n if not currency:\n currency = order_invoice.event.payment_currency\n\n if not currency or currency == \"\":\n currency = \"USD\"\n\n frontend_url = get_settings()['frontend_url']\n\n try:\n # payment_method_types = ['card', 'klarna']\n payment_method_types = ['card']\n # if currency.lower() == 'eur':\n # payment_method_types.append('sepa_debit')\n\n session = stripe.checkout.Session.create(\n payment_method_types= payment_method_types,\n line_items=[{\n 'price_data': {\n 'currency': currency.lower(),\n 'product_data': {\n 'name': order_invoice.event.name,\n },\n 'unit_amount': int(order_invoice.amount * 100),\n },\n 'quantity': 1,\n }],\n mode='payment',\n success_url=f\"{frontend_url}/orders/{order_invoice.identifier}/view\",\n cancel_url=f\"{frontend_url}/orders/{order_invoice.identifier}/view\",\n )\n\n return session\n\n except Exception as e:\n raise ConflictError({'pointer': ''}, str(e))\n\n @staticmethod\n def retrieve_session(event_id, stripe_session_id):\n credentials = StripePaymentsManager.get_credentials(event_id)\n\n if not credentials:\n raise ConflictError(\n {'pointer': ''}, 'Stripe credentials not found for the event.'\n )\n stripe.api_key = credentials['SECRET_KEY']\n session = stripe.checkout.Session.retrieve(stripe_session_id)\n\n return session\n\n @staticmethod\n def retrieve_payment_intent(event_id, payment_intent_id):\n credentials = StripePaymentsManager.get_credentials(event_id)\n\n if not credentials:\n raise ConflictError(\n {'pointer': ''}, 'Stripe credentials not found for the event.'\n )\n stripe.api_key = credentials['SECRET_KEY']\n payment_intent = stripe.PaymentIntent.retrieve(payment_intent_id)\n\n return payment_intent\n\n\nclass PayPalPaymentsManager:\n \"\"\"\n Class to manage payment through Paypal REST API.\n \"\"\"\n\n @staticmethod\n def configure_paypal():\n \"\"\"\n Configure the paypal sdk\n :return: Credentials\n \"\"\"\n settings = get_settings()\n # Use Sandbox by default.\n paypal_mode = settings.get(\n 'paypal_mode',\n 'live'\n if (settings['app_environment'] == Environment.PRODUCTION)\n else 'sandbox',\n )\n paypal_key = None\n if paypal_mode == 'sandbox':\n paypal_key = 'paypal_sandbox'\n elif paypal_mode == 'live':\n paypal_key = 'paypal'\n\n if not paypal_key:\n raise ConflictError(\n {'pointer': ''}, \"Paypal Mode must be 'live' or 'sandbox'\"\n )\n\n paypal_client = settings.get(f'{paypal_key}_client', None)\n paypal_secret = settings.get(f'{paypal_key}_secret', None)\n\n if not paypal_client or not paypal_secret:\n raise ConflictError(\n {'pointer': ''},\n \"Payments through Paypal have not been configured on the platform\",\n )\n return paypalrestsdk.configure(\n {\n \"mode\": paypal_mode,\n \"client_id\": paypal_client,\n \"client_secret\": paypal_secret,\n }\n )\n\n @staticmethod\n def create_payment(order, return_url, cancel_url, payee_email=None):\n \"\"\"\n Create payment for an order\n :param order: Order to create payment for.\n :param return_url: return url for the payment.\n :param cancel_url: cancel_url for the payment.\n :param payee_email: email of the payee. Default to event paypal email if not set\n :return: request_id or the error message along with an indicator.\n \"\"\"\n payee_email = payee_email or order.event.paypal_email\n if not payee_email:\n raise ConflictError(\n {'pointer': ''},\n \"Payments through Paypal hasn't been configured for the billing\",\n )\n\n PayPalPaymentsManager.configure_paypal()\n\n payment = paypalrestsdk.Payment(\n {\n \"intent\": \"sale\",\n \"payer\": {\"payment_method\": \"paypal\"},\n \"redirect_urls\": {\"return_url\": return_url, \"cancel_url\": cancel_url},\n \"transactions\": [\n {\n \"amount\": {\n \"total\": float(round_money(order.amount)),\n \"currency\": order.event.payment_currency,\n },\n \"payee\": {\"email\": payee_email},\n }\n ],\n }\n )\n\n if payment.create():\n return True, payment.id\n return False, payment.error\n\n @staticmethod\n def verify_payment(payment_id, order):\n \"\"\"\n Verify Paypal payment one more time for paying with Paypal in mobile client\n \"\"\"\n PayPalPaymentsManager.configure_paypal()\n try:\n payment_server = paypalrestsdk.Payment.find(payment_id)\n if payment_server.state != 'approved':\n return (\n False,\n 'Payment has not been approved yet. Status is '\n + payment_server.state\n + '.',\n )\n\n # Get the most recent transaction\n transaction = payment_server.transactions[0]\n amount_server = transaction.amount.total\n currency_server = transaction.amount.currency\n sale_state = transaction.related_resources[0].sale.state\n\n if float(amount_server) != order.amount:\n return False, 'Payment amount does not match order'\n if currency_server != order.event.payment_currency:\n return False, 'Payment currency does not match order'\n if sale_state != 'completed':\n return False, 'Sale not completed'\n if PayPalPaymentsManager.used_payment(payment_id, order):\n return False, 'Payment already been verified'\n return True, None\n except paypalrestsdk.ResourceNotFound:\n return False, 'Payment Not Found'\n\n @staticmethod\n def used_payment(payment_id, order):\n \"\"\"\n Function to check for recycling of payment IDs\n \"\"\"\n if Order.query.filter(Order.paypal_token == payment_id).first() is None:\n order.paypal_token = payment_id\n save_to_db(order)\n return False\n return True\n\n @staticmethod\n def execute_payment(paypal_payer_id, paypal_payment_id):\n \"\"\"\n Execute payemnt and charge the user.\n :param paypal_payment_id: payment_id\n :param paypal_payer_id: payer_id\n :return: Result of the transaction.\n \"\"\"\n PayPalPaymentsManager.configure_paypal()\n payment = paypalrestsdk.Payment.find(paypal_payment_id)\n\n if payment.execute({\"payer_id\": paypal_payer_id}):\n return True, 'Successfully Executed'\n return False, payment.error\n\n\nclass AliPayPaymentsManager:\n \"\"\"\n Class to manage AliPay Payments\n \"\"\"\n\n @staticmethod\n def create_source(amount, currency, redirect_return_uri):\n stripe.api_key = get_settings()['alipay_publishable_key']\n response = stripe.Source.create(\n type='alipay',\n currency=currency,\n amount=amount,\n redirect={'return_url': redirect_return_uri},\n )\n return response\n\n @staticmethod\n def charge_source(order_identifier):\n order = safe_query(Order, 'identifier', order_identifier, 'identifier')\n stripe.api_key = get_settings()['alipay_secret_key']\n charge = stripe.Charge.create(\n amount=int(order.amount),\n currency=order.event.payment_currency,\n source=order.order_notes,\n )\n return charge\n\n\nclass OmisePaymentsManager:\n \"\"\"\n Class to manage Omise Payments\n \"\"\"\n\n @staticmethod\n def charge_payment(order_identifier, token):\n if get_settings()['app_environment'] == Environment.PRODUCTION:\n omise.api_secret = get_settings()['omise_test_secret']\n omise.api_public = get_settings()['omise_test_public']\n else:\n omise.api_secret = get_settings()['omise_test_secret']\n omise.api_public = get_settings()['omise_test_public']\n order = safe_query(Order, 'identifier', order_identifier, 'identifier')\n charge = omise.Charge.create(\n amount=int(round(order.amount)),\n currency=order.event.payment_currency,\n card=token,\n metadata={\"order_id\": str(order_identifier), \"status\": True},\n )\n return charge\n\n\nclass PaytmPaymentsManager:\n \"\"\"\n Class to manage PayTM payments\n \"\"\"\n\n @property\n def paytm_endpoint(self):\n if get_settings()['paytm_mode'] == 'test':\n url = \"https://securegw-stage.paytm.in/theia/api/v1/\"\n else:\n url = \"https://securegw.paytm.in/theia/api/v1/\"\n return url\n\n @staticmethod\n def generate_checksum(paytm_params):\n if get_settings()['paytm_mode'] == 'test':\n merchant_key = get_settings()['paytm_sandbox_secret']\n else:\n merchant_key = get_settings()['paytm_live_secret']\n return checksum.generate_checksum_by_str(\n json.dumps(paytm_params[\"body\"]), merchant_key\n )\n\n @staticmethod\n def hit_paytm_endpoint(url, head, body=None):\n paytm_params = {}\n paytm_params[\"body\"] = body\n paytm_params[\"head\"] = head\n post_data = json.dumps(paytm_params)\n response = requests.post(\n url, data=post_data, headers={\"Content-type\": \"application/json\"}\n ).json()\n return response\n", "path": "app/api/helpers/payment.py"}], "after_files": [{"content": "import json\n\nimport omise\nimport paypalrestsdk\nimport requests\nimport stripe\nfrom forex_python.converter import CurrencyRates\n\nfrom app.api.helpers import checksum\nfrom app.api.helpers.cache import cache\nfrom app.api.helpers.db import safe_query, save_to_db\nfrom app.api.helpers.errors import ConflictError, ForbiddenError\nfrom app.api.helpers.utilities import represents_int, round_money\nfrom app.models.order import Order\nfrom app.models.stripe_authorization import StripeAuthorization\nfrom app.settings import Environment, get_settings\n\n\[email protected](5)\ndef forex(from_currency, to_currency, amount):\n try:\n currency_rates = CurrencyRates()\n return currency_rates.convert(from_currency, to_currency, amount)\n except:\n return amount\n\n\nclass StripePaymentsManager:\n \"\"\"\n Class to manage payments through Stripe.\n \"\"\"\n\n @staticmethod\n def get_credentials(event=None):\n \"\"\"\n If the event parameter is None, It returns the secret and publishable key of the Admin's Stripe account.\n Else, it returns the corresponding values for the event organizer's account.\n :param event:\n :return: Stripe secret and publishable keys.\n \"\"\"\n if not event:\n settings = get_settings()\n if (\n settings['app_environment'] == 'development'\n and settings['stripe_test_secret_key']\n and settings['stripe_test_publishable_key']\n ):\n return {\n 'SECRET_KEY': settings['stripe_test_secret_key'],\n 'PUBLISHABLE_KEY': settings[\"stripe_test_publishable_key\"],\n }\n if settings['stripe_secret_key'] and settings[\"stripe_publishable_key\"]:\n return {\n 'SECRET_KEY': settings['stripe_secret_key'],\n 'PUBLISHABLE_KEY': settings[\"stripe_publishable_key\"],\n }\n return None\n if represents_int(event):\n authorization = StripeAuthorization.query.filter_by(event_id=event).first()\n else:\n authorization = event.stripe_authorization\n if authorization:\n return {\n 'SECRET_KEY': authorization.stripe_secret_key,\n 'PUBLISHABLE_KEY': authorization.stripe_publishable_key,\n }\n return None\n\n @staticmethod\n def get_event_organizer_credentials_from_stripe(stripe_auth_code):\n \"\"\"\n Uses the stripe_auth_code to get the other credentials for the event organizer's stripe account\n :param stripe_auth_code: stripe authorization code\n :return: response from stripe\n \"\"\"\n credentials = StripePaymentsManager.get_credentials()\n\n if not credentials:\n raise ForbiddenError(\n {'pointer': ''},\n \"Stripe payment isn't configured properly for the Platform\",\n )\n\n data = {\n 'client_secret': credentials['SECRET_KEY'],\n 'code': stripe_auth_code,\n 'grant_type': 'authorization_code',\n }\n\n response = requests.post('https://connect.stripe.com/oauth/token', data=data)\n return json.loads(response.text)\n\n @staticmethod\n def capture_payment(order_invoice, currency=None, credentials=None):\n \"\"\"\n Capture payments through stripe.\n :param order_invoice: Order to be charged for\n :param currency: Currency of the order amount.\n :param credentials: Stripe credentials.\n :return: charge/None depending on success/failure.\n \"\"\"\n if not credentials:\n credentials = StripePaymentsManager.get_credentials(order_invoice.event)\n\n if not credentials:\n raise ConflictError(\n {'pointer': ''}, 'Stripe credentials not found for the event.'\n )\n stripe.api_key = credentials['SECRET_KEY']\n if not currency:\n currency = order_invoice.event.payment_currency\n\n if not currency or currency == \"\":\n currency = \"USD\"\n\n frontend_url = get_settings()['frontend_url']\n\n try:\n # payment_method_types = ['card', 'klarna']\n payment_method_types = ['card']\n # if currency.lower() == 'eur':\n # payment_method_types.append('sepa_debit')\n\n session = stripe.checkout.Session.create(\n customer_email=order_invoice.user.email,\n payment_method_types=payment_method_types,\n line_items=[\n {\n 'price_data': {\n 'currency': currency.lower(),\n 'product_data': {\n 'name': order_invoice.event.name,\n },\n 'unit_amount': int(order_invoice.amount * 100),\n },\n 'quantity': 1,\n }\n ],\n mode='payment',\n success_url=f\"{frontend_url}/orders/{order_invoice.identifier}/view\",\n cancel_url=f\"{frontend_url}/orders/{order_invoice.identifier}/view\",\n )\n\n return session\n\n except Exception as e:\n raise ConflictError({'pointer': ''}, str(e))\n\n @staticmethod\n def retrieve_session(event_id, stripe_session_id):\n credentials = StripePaymentsManager.get_credentials(event_id)\n\n if not credentials:\n raise ConflictError(\n {'pointer': ''}, 'Stripe credentials not found for the event.'\n )\n stripe.api_key = credentials['SECRET_KEY']\n session = stripe.checkout.Session.retrieve(stripe_session_id)\n\n return session\n\n @staticmethod\n def retrieve_payment_intent(event_id, payment_intent_id):\n credentials = StripePaymentsManager.get_credentials(event_id)\n\n if not credentials:\n raise ConflictError(\n {'pointer': ''}, 'Stripe credentials not found for the event.'\n )\n stripe.api_key = credentials['SECRET_KEY']\n payment_intent = stripe.PaymentIntent.retrieve(payment_intent_id)\n\n return payment_intent\n\n\nclass PayPalPaymentsManager:\n \"\"\"\n Class to manage payment through Paypal REST API.\n \"\"\"\n\n @staticmethod\n def configure_paypal():\n \"\"\"\n Configure the paypal sdk\n :return: Credentials\n \"\"\"\n settings = get_settings()\n # Use Sandbox by default.\n paypal_mode = settings.get(\n 'paypal_mode',\n 'live'\n if (settings['app_environment'] == Environment.PRODUCTION)\n else 'sandbox',\n )\n paypal_key = None\n if paypal_mode == 'sandbox':\n paypal_key = 'paypal_sandbox'\n elif paypal_mode == 'live':\n paypal_key = 'paypal'\n\n if not paypal_key:\n raise ConflictError(\n {'pointer': ''}, \"Paypal Mode must be 'live' or 'sandbox'\"\n )\n\n paypal_client = settings.get(f'{paypal_key}_client', None)\n paypal_secret = settings.get(f'{paypal_key}_secret', None)\n\n if not paypal_client or not paypal_secret:\n raise ConflictError(\n {'pointer': ''},\n \"Payments through Paypal have not been configured on the platform\",\n )\n return paypalrestsdk.configure(\n {\n \"mode\": paypal_mode,\n \"client_id\": paypal_client,\n \"client_secret\": paypal_secret,\n }\n )\n\n @staticmethod\n def create_payment(order, return_url, cancel_url, payee_email=None):\n \"\"\"\n Create payment for an order\n :param order: Order to create payment for.\n :param return_url: return url for the payment.\n :param cancel_url: cancel_url for the payment.\n :param payee_email: email of the payee. Default to event paypal email if not set\n :return: request_id or the error message along with an indicator.\n \"\"\"\n payee_email = payee_email or order.event.paypal_email\n if not payee_email:\n raise ConflictError(\n {'pointer': ''},\n \"Payments through Paypal hasn't been configured for the billing\",\n )\n\n PayPalPaymentsManager.configure_paypal()\n\n payment = paypalrestsdk.Payment(\n {\n \"intent\": \"sale\",\n \"payer\": {\"payment_method\": \"paypal\"},\n \"redirect_urls\": {\"return_url\": return_url, \"cancel_url\": cancel_url},\n \"transactions\": [\n {\n \"amount\": {\n \"total\": float(round_money(order.amount)),\n \"currency\": order.event.payment_currency,\n },\n \"payee\": {\"email\": payee_email},\n }\n ],\n }\n )\n\n if payment.create():\n return True, payment.id\n return False, payment.error\n\n @staticmethod\n def verify_payment(payment_id, order):\n \"\"\"\n Verify Paypal payment one more time for paying with Paypal in mobile client\n \"\"\"\n PayPalPaymentsManager.configure_paypal()\n try:\n payment_server = paypalrestsdk.Payment.find(payment_id)\n if payment_server.state != 'approved':\n return (\n False,\n 'Payment has not been approved yet. Status is '\n + payment_server.state\n + '.',\n )\n\n # Get the most recent transaction\n transaction = payment_server.transactions[0]\n amount_server = transaction.amount.total\n currency_server = transaction.amount.currency\n sale_state = transaction.related_resources[0].sale.state\n\n if float(amount_server) != order.amount:\n return False, 'Payment amount does not match order'\n if currency_server != order.event.payment_currency:\n return False, 'Payment currency does not match order'\n if sale_state != 'completed':\n return False, 'Sale not completed'\n if PayPalPaymentsManager.used_payment(payment_id, order):\n return False, 'Payment already been verified'\n return True, None\n except paypalrestsdk.ResourceNotFound:\n return False, 'Payment Not Found'\n\n @staticmethod\n def used_payment(payment_id, order):\n \"\"\"\n Function to check for recycling of payment IDs\n \"\"\"\n if Order.query.filter(Order.paypal_token == payment_id).first() is None:\n order.paypal_token = payment_id\n save_to_db(order)\n return False\n return True\n\n @staticmethod\n def execute_payment(paypal_payer_id, paypal_payment_id):\n \"\"\"\n Execute payemnt and charge the user.\n :param paypal_payment_id: payment_id\n :param paypal_payer_id: payer_id\n :return: Result of the transaction.\n \"\"\"\n PayPalPaymentsManager.configure_paypal()\n payment = paypalrestsdk.Payment.find(paypal_payment_id)\n\n if payment.execute({\"payer_id\": paypal_payer_id}):\n return True, 'Successfully Executed'\n return False, payment.error\n\n\nclass AliPayPaymentsManager:\n \"\"\"\n Class to manage AliPay Payments\n \"\"\"\n\n @staticmethod\n def create_source(amount, currency, redirect_return_uri):\n stripe.api_key = get_settings()['alipay_publishable_key']\n response = stripe.Source.create(\n type='alipay',\n currency=currency,\n amount=amount,\n redirect={'return_url': redirect_return_uri},\n )\n return response\n\n @staticmethod\n def charge_source(order_identifier):\n order = safe_query(Order, 'identifier', order_identifier, 'identifier')\n stripe.api_key = get_settings()['alipay_secret_key']\n charge = stripe.Charge.create(\n amount=int(order.amount),\n currency=order.event.payment_currency,\n source=order.order_notes,\n )\n return charge\n\n\nclass OmisePaymentsManager:\n \"\"\"\n Class to manage Omise Payments\n \"\"\"\n\n @staticmethod\n def charge_payment(order_identifier, token):\n if get_settings()['app_environment'] == Environment.PRODUCTION:\n omise.api_secret = get_settings()['omise_test_secret']\n omise.api_public = get_settings()['omise_test_public']\n else:\n omise.api_secret = get_settings()['omise_test_secret']\n omise.api_public = get_settings()['omise_test_public']\n order = safe_query(Order, 'identifier', order_identifier, 'identifier')\n charge = omise.Charge.create(\n amount=int(round(order.amount)),\n currency=order.event.payment_currency,\n card=token,\n metadata={\"order_id\": str(order_identifier), \"status\": True},\n )\n return charge\n\n\nclass PaytmPaymentsManager:\n \"\"\"\n Class to manage PayTM payments\n \"\"\"\n\n @property\n def paytm_endpoint(self):\n if get_settings()['paytm_mode'] == 'test':\n url = \"https://securegw-stage.paytm.in/theia/api/v1/\"\n else:\n url = \"https://securegw.paytm.in/theia/api/v1/\"\n return url\n\n @staticmethod\n def generate_checksum(paytm_params):\n if get_settings()['paytm_mode'] == 'test':\n merchant_key = get_settings()['paytm_sandbox_secret']\n else:\n merchant_key = get_settings()['paytm_live_secret']\n return checksum.generate_checksum_by_str(\n json.dumps(paytm_params[\"body\"]), merchant_key\n )\n\n @staticmethod\n def hit_paytm_endpoint(url, head, body=None):\n paytm_params = {}\n paytm_params[\"body\"] = body\n paytm_params[\"head\"] = head\n post_data = json.dumps(paytm_params)\n response = requests.post(\n url, data=post_data, headers={\"Content-type\": \"application/json\"}\n ).json()\n return response\n", "path": "app/api/helpers/payment.py"}]}
| 4,093 | 281 |
gh_patches_debug_14039
|
rasdani/github-patches
|
git_diff
|
conda__conda-5236
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
An unexpected error has occurred.
Current conda install:
platform : win-64
conda version : 4.3.11
conda is private : False
conda-env version : 4.3.11
conda-build version : 2.0.2
python version : 2.7.12.final.0
requests version : 2.13.0
root environment : I:\Program Files\Anaconda2 (writable)
default environment : I:\Program Files\Anaconda2
envs directories : I:\Program Files\Anaconda2\envs
C:\Users\topnet\AppData\Local\conda\conda\envs
C:\Users\topnet\.conda\envs
package cache : I:\Program Files\Anaconda2\pkgs
C:\Users\topnet\AppData\Local\conda\conda\pkgs
channel URLs : https://conda.anaconda.org/conda-forge/win-64
https://conda.anaconda.org/conda-forge/noarch
https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/win-64
https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/noarch
https://repo.continuum.io/pkgs/free/win-64
https://repo.continuum.io/pkgs/free/noarch
https://repo.continuum.io/pkgs/r/win-64
https://repo.continuum.io/pkgs/r/noarch
https://repo.continuum.io/pkgs/pro/win-64
https://repo.continuum.io/pkgs/pro/noarch
https://repo.continuum.io/pkgs/msys2/win-64
https://repo.continuum.io/pkgs/msys2/noarch
config file : C:\Users\topnet\.condarc
offline mode : False
user-agent : conda/4.3.11 requests/2.13.0 CPython/2.7.12 Windows/10 Windows/10.0.14393
`$ I:\Program Files\Anaconda2\Scripts\conda-script.py install numpy`
Traceback (most recent call last):
File "I:\Program Files\Anaconda2\lib\site-packages\conda\exceptions.py", line 616, in conda_exception_handler
return_value = func(*args, **kwargs)
File "I:\Program Files\Anaconda2\lib\site-packages\conda\cli\main.py", line 137, in _main
exit_code = args.func(args, p)
File "I:\Program Files\Anaconda2\lib\site-packages\conda\cli\main_install.py", line 80, in execute
install(args, parser, 'install')
File "I:\Program Files\Anaconda2\lib\site-packages\conda\cli\install.py", line 359, in install
execute_actions(actions, index, verbose=not context.quiet)
File "I:\Program Files\Anaconda2\lib\site-packages\conda\plan.py", line 825, in execute_actions
execute_instructions(plan, index, verbose)
File "I:\Program Files\Anaconda2\lib\site-packages\conda\instructions.py", line 258, in execute_instructions
cmd(state, arg)
File "I:\Program Files\Anaconda2\lib\site-packages\conda\instructions.py", line 111, in PROGRESSIVEFETCHEXTRACT_CMD
progressive_fetch_extract.execute()
File "I:\Program Files\Anaconda2\lib\site-packages\conda\core\package_cache.py", line 470, in execute
self._execute_action(action)
File "I:\Program Files\Anaconda2\lib\site-packages\conda\core\package_cache.py", line 486, in _execute_action
exceptions.append(CondaError(repr(e)))
File "I:\Program Files\Anaconda2\lib\site-packages\conda\__init__.py", line 43, in __repr__
return '%s: %s\n' % (self.__class__.__name__, text_type(self))
File "I:\Program Files\Anaconda2\lib\site-packages\conda\__init__.py", line 46, in __str__
return text_type(self.message % self._kwargs)
TypeError: not enough arguments for format string
thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda/__init__.py`
Content:
```
1 # (c) 2012-2016 Continuum Analytics, Inc. / http://continuum.io
2 # All Rights Reserved
3 #
4 # conda is distributed under the terms of the BSD 3-clause license.
5 # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
6 """OS-agnostic, system-level binary package manager."""
7 from __future__ import absolute_import, division, print_function, unicode_literals
8
9 import os
10 from os.path import dirname
11 import sys
12
13 from ._vendor.auxlib.packaging import get_version
14 from .common.compat import iteritems, text_type
15
16 __all__ = (
17 "__name__", "__version__", "__author__", "__email__", "__license__", "__summary__", "__url__",
18 "CONDA_PACKAGE_ROOT", "CondaError", "CondaMultiError", "CondaExitZero", "conda_signal_handler",
19 )
20
21 __name__ = "conda"
22 __version__ = get_version(__file__)
23 __author__ = "Continuum Analytics, Inc."
24 __email__ = "[email protected]"
25 __license__ = "BSD"
26 __summary__ = __doc__
27 __url__ = "https://github.com/conda/conda"
28
29
30 if os.getenv('CONDA_ROOT') is None:
31 os.environ[str('CONDA_ROOT')] = sys.prefix
32
33 CONDA_PACKAGE_ROOT = dirname(__file__)
34
35
36 class CondaError(Exception):
37 def __init__(self, message, caused_by=None, **kwargs):
38 self.message = message
39 self._kwargs = kwargs
40 self._caused_by = caused_by
41 super(CondaError, self).__init__(message)
42
43 def __repr__(self):
44 return '%s: %s' % (self.__class__.__name__, text_type(self))
45
46 def __str__(self):
47 return text_type(self.message % self._kwargs)
48
49 def dump_map(self):
50 result = dict((k, v) for k, v in iteritems(vars(self)) if not k.startswith('_'))
51 result.update(exception_type=text_type(type(self)),
52 exception_name=self.__class__.__name__,
53 message=text_type(self),
54 error=repr(self),
55 caused_by=repr(self._caused_by),
56 **self._kwargs)
57 return result
58
59
60 class CondaMultiError(CondaError):
61
62 def __init__(self, errors):
63 self.errors = errors
64 super(CondaError, self).__init__(None)
65
66 def __repr__(self):
67 return '\n'.join(repr(e) for e in self.errors) + '\n'
68
69 def __str__(self):
70 return '\n'.join(text_type(e) for e in self.errors) + '\n'
71
72 def dump_map(self):
73 return dict(exception_type=text_type(type(self)),
74 exception_name=self.__class__.__name__,
75 errors=tuple(error.dump_map() for error in self.errors),
76 error="Multiple Errors Encountered.",
77 )
78
79
80 class CondaExitZero(CondaError):
81 pass
82
83
84 ACTIVE_SUBPROCESSES = set()
85
86
87 def conda_signal_handler(signum, frame):
88 # This function is in the base __init__.py so that it can be monkey-patched by other code
89 # if downstream conda users so choose. The biggest danger of monkey-patching is that
90 # unlink/link transactions don't get rolled back if interrupted mid-transaction.
91 for p in ACTIVE_SUBPROCESSES:
92 if p.poll() is None:
93 p.send_signal(signum)
94
95 from .exceptions import CondaSignalInterrupt
96 raise CondaSignalInterrupt(signum)
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conda/__init__.py b/conda/__init__.py
--- a/conda/__init__.py
+++ b/conda/__init__.py
@@ -44,7 +44,19 @@
return '%s: %s' % (self.__class__.__name__, text_type(self))
def __str__(self):
- return text_type(self.message % self._kwargs)
+ try:
+ return text_type(self.message % self._kwargs)
+ except TypeError:
+ # TypeError: not enough arguments for format string
+ debug_message = "\n".join((
+ "class: " + self.__class__.__name__,
+ "message:",
+ self.message,
+ "kwargs:",
+ text_type(self._kwargs),
+ ))
+ sys.stderr.write(debug_message)
+ raise
def dump_map(self):
result = dict((k, v) for k, v in iteritems(vars(self)) if not k.startswith('_'))
|
{"golden_diff": "diff --git a/conda/__init__.py b/conda/__init__.py\n--- a/conda/__init__.py\n+++ b/conda/__init__.py\n@@ -44,7 +44,19 @@\n return '%s: %s' % (self.__class__.__name__, text_type(self))\n \n def __str__(self):\n- return text_type(self.message % self._kwargs)\n+ try:\n+ return text_type(self.message % self._kwargs)\n+ except TypeError:\n+ # TypeError: not enough arguments for format string\n+ debug_message = \"\\n\".join((\n+ \"class: \" + self.__class__.__name__,\n+ \"message:\",\n+ self.message,\n+ \"kwargs:\",\n+ text_type(self._kwargs),\n+ ))\n+ sys.stderr.write(debug_message)\n+ raise\n \n def dump_map(self):\n result = dict((k, v) for k, v in iteritems(vars(self)) if not k.startswith('_'))\n", "issue": "An unexpected error has occurred.\nCurrent conda install:\r\n\r\n platform : win-64\r\n conda version : 4.3.11\r\n conda is private : False\r\n conda-env version : 4.3.11\r\n conda-build version : 2.0.2\r\n python version : 2.7.12.final.0\r\n requests version : 2.13.0\r\n root environment : I:\\Program Files\\Anaconda2 (writable)\r\n default environment : I:\\Program Files\\Anaconda2\r\n envs directories : I:\\Program Files\\Anaconda2\\envs\r\n C:\\Users\\topnet\\AppData\\Local\\conda\\conda\\envs\r\n C:\\Users\\topnet\\.conda\\envs\r\n package cache : I:\\Program Files\\Anaconda2\\pkgs\r\n C:\\Users\\topnet\\AppData\\Local\\conda\\conda\\pkgs\r\n channel URLs : https://conda.anaconda.org/conda-forge/win-64\r\n https://conda.anaconda.org/conda-forge/noarch\r\n https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/win-64\r\n https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/noarch\r\n https://repo.continuum.io/pkgs/free/win-64\r\n https://repo.continuum.io/pkgs/free/noarch\r\n https://repo.continuum.io/pkgs/r/win-64\r\n https://repo.continuum.io/pkgs/r/noarch\r\n https://repo.continuum.io/pkgs/pro/win-64\r\n https://repo.continuum.io/pkgs/pro/noarch\r\n https://repo.continuum.io/pkgs/msys2/win-64\r\n https://repo.continuum.io/pkgs/msys2/noarch\r\n config file : C:\\Users\\topnet\\.condarc\r\n offline mode : False\r\n user-agent : conda/4.3.11 requests/2.13.0 CPython/2.7.12 Windows/10 Windows/10.0.14393\r\n\r\n\r\n\r\n`$ I:\\Program Files\\Anaconda2\\Scripts\\conda-script.py install numpy`\r\n\r\n\r\n\r\n\r\n Traceback (most recent call last):\r\n File \"I:\\Program Files\\Anaconda2\\lib\\site-packages\\conda\\exceptions.py\", line 616, in conda_exception_handler\r\n return_value = func(*args, **kwargs)\r\n File \"I:\\Program Files\\Anaconda2\\lib\\site-packages\\conda\\cli\\main.py\", line 137, in _main\r\n exit_code = args.func(args, p)\r\n File \"I:\\Program Files\\Anaconda2\\lib\\site-packages\\conda\\cli\\main_install.py\", line 80, in execute\r\n install(args, parser, 'install')\r\n File \"I:\\Program Files\\Anaconda2\\lib\\site-packages\\conda\\cli\\install.py\", line 359, in install\r\n execute_actions(actions, index, verbose=not context.quiet)\r\n File \"I:\\Program Files\\Anaconda2\\lib\\site-packages\\conda\\plan.py\", line 825, in execute_actions\r\n execute_instructions(plan, index, verbose)\r\n File \"I:\\Program Files\\Anaconda2\\lib\\site-packages\\conda\\instructions.py\", line 258, in execute_instructions\r\n cmd(state, arg)\r\n File \"I:\\Program Files\\Anaconda2\\lib\\site-packages\\conda\\instructions.py\", line 111, in PROGRESSIVEFETCHEXTRACT_CMD\r\n progressive_fetch_extract.execute()\r\n File \"I:\\Program Files\\Anaconda2\\lib\\site-packages\\conda\\core\\package_cache.py\", line 470, in execute\r\n self._execute_action(action)\r\n File \"I:\\Program Files\\Anaconda2\\lib\\site-packages\\conda\\core\\package_cache.py\", line 486, in _execute_action\r\n exceptions.append(CondaError(repr(e)))\r\n File \"I:\\Program Files\\Anaconda2\\lib\\site-packages\\conda\\__init__.py\", line 43, in __repr__\r\n return '%s: %s\\n' % (self.__class__.__name__, text_type(self))\r\n File \"I:\\Program Files\\Anaconda2\\lib\\site-packages\\conda\\__init__.py\", line 46, in __str__\r\n return text_type(self.message % self._kwargs)\r\n TypeError: not enough arguments for format string\r\n\r\nthanks\uff01\n", "before_files": [{"content": "# (c) 2012-2016 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\n\"\"\"OS-agnostic, system-level binary package manager.\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nfrom os.path import dirname\nimport sys\n\nfrom ._vendor.auxlib.packaging import get_version\nfrom .common.compat import iteritems, text_type\n\n__all__ = (\n \"__name__\", \"__version__\", \"__author__\", \"__email__\", \"__license__\", \"__summary__\", \"__url__\",\n \"CONDA_PACKAGE_ROOT\", \"CondaError\", \"CondaMultiError\", \"CondaExitZero\", \"conda_signal_handler\",\n)\n\n__name__ = \"conda\"\n__version__ = get_version(__file__)\n__author__ = \"Continuum Analytics, Inc.\"\n__email__ = \"[email protected]\"\n__license__ = \"BSD\"\n__summary__ = __doc__\n__url__ = \"https://github.com/conda/conda\"\n\n\nif os.getenv('CONDA_ROOT') is None:\n os.environ[str('CONDA_ROOT')] = sys.prefix\n\nCONDA_PACKAGE_ROOT = dirname(__file__)\n\n\nclass CondaError(Exception):\n def __init__(self, message, caused_by=None, **kwargs):\n self.message = message\n self._kwargs = kwargs\n self._caused_by = caused_by\n super(CondaError, self).__init__(message)\n\n def __repr__(self):\n return '%s: %s' % (self.__class__.__name__, text_type(self))\n\n def __str__(self):\n return text_type(self.message % self._kwargs)\n\n def dump_map(self):\n result = dict((k, v) for k, v in iteritems(vars(self)) if not k.startswith('_'))\n result.update(exception_type=text_type(type(self)),\n exception_name=self.__class__.__name__,\n message=text_type(self),\n error=repr(self),\n caused_by=repr(self._caused_by),\n **self._kwargs)\n return result\n\n\nclass CondaMultiError(CondaError):\n\n def __init__(self, errors):\n self.errors = errors\n super(CondaError, self).__init__(None)\n\n def __repr__(self):\n return '\\n'.join(repr(e) for e in self.errors) + '\\n'\n\n def __str__(self):\n return '\\n'.join(text_type(e) for e in self.errors) + '\\n'\n\n def dump_map(self):\n return dict(exception_type=text_type(type(self)),\n exception_name=self.__class__.__name__,\n errors=tuple(error.dump_map() for error in self.errors),\n error=\"Multiple Errors Encountered.\",\n )\n\n\nclass CondaExitZero(CondaError):\n pass\n\n\nACTIVE_SUBPROCESSES = set()\n\n\ndef conda_signal_handler(signum, frame):\n # This function is in the base __init__.py so that it can be monkey-patched by other code\n # if downstream conda users so choose. The biggest danger of monkey-patching is that\n # unlink/link transactions don't get rolled back if interrupted mid-transaction.\n for p in ACTIVE_SUBPROCESSES:\n if p.poll() is None:\n p.send_signal(signum)\n\n from .exceptions import CondaSignalInterrupt\n raise CondaSignalInterrupt(signum)\n", "path": "conda/__init__.py"}], "after_files": [{"content": "# (c) 2012-2016 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\n\"\"\"OS-agnostic, system-level binary package manager.\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nfrom os.path import dirname\nimport sys\n\nfrom ._vendor.auxlib.packaging import get_version\nfrom .common.compat import iteritems, text_type\n\n__all__ = (\n \"__name__\", \"__version__\", \"__author__\", \"__email__\", \"__license__\", \"__summary__\", \"__url__\",\n \"CONDA_PACKAGE_ROOT\", \"CondaError\", \"CondaMultiError\", \"CondaExitZero\", \"conda_signal_handler\",\n)\n\n__name__ = \"conda\"\n__version__ = get_version(__file__)\n__author__ = \"Continuum Analytics, Inc.\"\n__email__ = \"[email protected]\"\n__license__ = \"BSD\"\n__summary__ = __doc__\n__url__ = \"https://github.com/conda/conda\"\n\n\nif os.getenv('CONDA_ROOT') is None:\n os.environ[str('CONDA_ROOT')] = sys.prefix\n\nCONDA_PACKAGE_ROOT = dirname(__file__)\n\n\nclass CondaError(Exception):\n def __init__(self, message, caused_by=None, **kwargs):\n self.message = message\n self._kwargs = kwargs\n self._caused_by = caused_by\n super(CondaError, self).__init__(message)\n\n def __repr__(self):\n return '%s: %s' % (self.__class__.__name__, text_type(self))\n\n def __str__(self):\n try:\n return text_type(self.message % self._kwargs)\n except TypeError:\n # TypeError: not enough arguments for format string\n debug_message = \"\\n\".join((\n \"class: \" + self.__class__.__name__,\n \"message:\",\n self.message,\n \"kwargs:\",\n text_type(self._kwargs),\n ))\n sys.stderr.write(debug_message)\n raise\n\n def dump_map(self):\n result = dict((k, v) for k, v in iteritems(vars(self)) if not k.startswith('_'))\n result.update(exception_type=text_type(type(self)),\n exception_name=self.__class__.__name__,\n message=text_type(self),\n error=repr(self),\n caused_by=repr(self._caused_by),\n **self._kwargs)\n return result\n\n\nclass CondaMultiError(CondaError):\n\n def __init__(self, errors):\n self.errors = errors\n super(CondaError, self).__init__(None)\n\n def __repr__(self):\n return '\\n'.join(repr(e) for e in self.errors) + '\\n'\n\n def __str__(self):\n return '\\n'.join(text_type(e) for e in self.errors) + '\\n'\n\n def dump_map(self):\n return dict(exception_type=text_type(type(self)),\n exception_name=self.__class__.__name__,\n errors=tuple(error.dump_map() for error in self.errors),\n error=\"Multiple Errors Encountered.\",\n )\n\n\nclass CondaExitZero(CondaError):\n pass\n\n\nACTIVE_SUBPROCESSES = set()\n\n\ndef conda_signal_handler(signum, frame):\n # This function is in the base __init__.py so that it can be monkey-patched by other code\n # if downstream conda users so choose. The biggest danger of monkey-patching is that\n # unlink/link transactions don't get rolled back if interrupted mid-transaction.\n for p in ACTIVE_SUBPROCESSES:\n if p.poll() is None:\n p.send_signal(signum)\n\n from .exceptions import CondaSignalInterrupt\n raise CondaSignalInterrupt(signum)\n", "path": "conda/__init__.py"}]}
| 2,240 | 214 |
gh_patches_debug_8159
|
rasdani/github-patches
|
git_diff
|
qtile__qtile-1738
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot input in gmrun window
This commit 0f886606e1f35226b325f5405d43946976eae831 introduced a bug for `gmrun` windows at least. When spawning, one cannot input text into it. This is fixed by unfloating the window, and continues to work even when refloating it.
Window properties:
```
_NET_WM_USER_TIME(CARDINAL) = 5511537
_NET_WM_DESKTOP(CARDINAL) = 1
WM_STATE(WM_STATE):
window state: Normal
icon window: 0x0
WM_HINTS(WM_HINTS):
Client accepts input or input focus: True
Initial state is Normal State.
window id # of group leader: 0x1c00001
XdndAware(ATOM) = BITMAP
_MOTIF_DRAG_RECEIVER_INFO(_MOTIF_DRAG_RECEIVER_INFO) = 0x6c, 0x0, 0x5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10, 0x0, 0x0, 0x0
_MOTIF_WM_HINTS(_MOTIF_WM_HINTS) = 0x2, 0x0, 0x2, 0x0, 0x0
_NET_WM_SYNC_REQUEST_COUNTER(CARDINAL) = 29360133
_NET_WM_WINDOW_TYPE(ATOM) = _NET_WM_WINDOW_TYPE_DIALOG
_NET_WM_USER_TIME_WINDOW(WINDOW): window id # 0x1c00004
WM_CLIENT_LEADER(WINDOW): window id # 0x1c00001
_NET_WM_PID(CARDINAL) = 64030
WM_LOCALE_NAME(STRING) = "en_US.UTF-8"
WM_CLIENT_MACHINE(STRING) = "corsair"
WM_NORMAL_HINTS(WM_SIZE_HINTS):
program specified location: 0, 0
program specified minimum size: 412 by 75
program specified maximum size: 412 by 75
window gravity: NorthWest
WM_PROTOCOLS(ATOM): protocols WM_DELETE_WINDOW, WM_TAKE_FOCUS, _NET_WM_PING, _NET_WM_SYNC_REQUEST
WM_CLASS(STRING) = "gmrun", "Gmrun"
WM_ICON_NAME(STRING) = "Execute program feat. completion"
_NET_WM_ICON_NAME(UTF8_STRING) = "Execute program feat. completion"
WM_NAME(STRING) = "Execute program feat. completion"
_NET_WM_NAME(UTF8_STRING) = "Execute program feat. completion"
```
Just tell me if you need logs and I'll add some!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libqtile/layout/floating.py`
Content:
```
1 # Copyright (c) 2010 matt
2 # Copyright (c) 2010-2011 Paul Colomiets
3 # Copyright (c) 2011 Mounier Florian
4 # Copyright (c) 2012 Craig Barnes
5 # Copyright (c) 2012, 2014-2015 Tycho Andersen
6 # Copyright (c) 2013 Tao Sauvage
7 # Copyright (c) 2013 Julien Iguchi-Cartigny
8 # Copyright (c) 2014 ramnes
9 # Copyright (c) 2014 Sean Vig
10 # Copyright (c) 2014 dequis
11 # Copyright (c) 2018 Nazar Mokrynskyi
12 #
13 # Permission is hereby granted, free of charge, to any person obtaining a copy
14 # of this software and associated documentation files (the "Software"), to deal
15 # in the Software without restriction, including without limitation the rights
16 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
17 # copies of the Software, and to permit persons to whom the Software is
18 # furnished to do so, subject to the following conditions:
19 #
20 # The above copyright notice and this permission notice shall be included in
21 # all copies or substantial portions of the Software.
22 #
23 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 # SOFTWARE.
30
31 from libqtile.layout.base import Layout
32
33 DEFAULT_FLOAT_WM_TYPES = set([
34 'utility',
35 'notification',
36 'toolbar',
37 'splash',
38 'dialog',
39 ])
40
41 DEFAULT_FLOAT_RULES = [
42 {"role": "About"},
43 {"wmclass": "file_progress"},
44 ]
45
46
47 class Floating(Layout):
48 """
49 Floating layout, which does nothing with windows but handles focus order
50 """
51 defaults = [
52 ("border_focus", "#0000ff", "Border colour for the focused window."),
53 ("border_normal", "#000000", "Border colour for un-focused windows."),
54 ("border_width", 1, "Border width."),
55 ("max_border_width", 0, "Border width for maximize."),
56 ("fullscreen_border_width", 0, "Border width for fullscreen."),
57 ("name", "floating", "Name of this layout."),
58 (
59 "auto_float_types",
60 DEFAULT_FLOAT_WM_TYPES,
61 "default wm types to automatically float"
62 ),
63 ]
64
65 def __init__(self, float_rules=None, no_reposition_match=None, **config):
66 """
67 If you have certain apps that you always want to float you can provide
68 ``float_rules`` to do so. ``float_rules`` is a list of
69 dictionaries containing some or all of the keys::
70
71 {'wname': WM_NAME, 'wmclass': WM_CLASS, 'role': WM_WINDOW_ROLE}
72
73 The keys must be specified as above. You only need one, but
74 you need to provide the value for it. When a new window is
75 opened it's ``match`` method is called with each of these
76 rules. If one matches, the window will float. The following
77 will float gimp and skype::
78
79 float_rules=[dict(wmclass="skype"), dict(wmclass="gimp")]
80
81 Specify these in the ``floating_layout`` in your config.
82
83 Floating layout will try to center most of floating windows by default,
84 but if you don't want this to happen for certain windows that are
85 centered by mistake, you can use ``no_reposition_match`` option to
86 specify them and layout will rely on windows to position themselves in
87 correct location on the screen.
88 """
89 Layout.__init__(self, **config)
90 self.clients = []
91 self.focused = None
92 self.group = None
93 self.float_rules = float_rules or DEFAULT_FLOAT_RULES
94 self.no_reposition_match = no_reposition_match
95 self.add_defaults(Floating.defaults)
96
97 def match(self, win):
98 """Used to default float some windows"""
99 if win.window.get_wm_type() in self.auto_float_types:
100 return True
101 for rule_dict in self.float_rules:
102 if win.match(**rule_dict):
103 return True
104 return False
105
106 def find_clients(self, group):
107 """Find all clients belonging to a given group"""
108 return [c for c in self.clients if c.group is group]
109
110 def to_screen(self, group, new_screen):
111 """Adjust offsets of clients within current screen"""
112 for win in self.find_clients(group):
113 if win.maximized:
114 win.maximized = True
115 elif win.fullscreen:
116 win.fullscreen = True
117 else:
118 # catch if the client hasn't been configured
119 try:
120 # By default, place window at same offset from top corner
121 new_x = new_screen.x + win.float_x
122 new_y = new_screen.y + win.float_y
123 except AttributeError:
124 # this will be handled in .configure()
125 pass
126 else:
127 # make sure window isn't off screen left/right...
128 new_x = min(new_x, new_screen.x + new_screen.width - win.width)
129 new_x = max(new_x, new_screen.x)
130 # and up/down
131 new_y = min(new_y, new_screen.y + new_screen.height - win.height)
132 new_y = max(new_y, new_screen.y)
133
134 win.x = new_x
135 win.y = new_y
136 win.group = new_screen.group
137
138 def focus_first(self, group=None):
139 if group is None:
140 clients = self.clients
141 else:
142 clients = self.find_clients(group)
143
144 if clients:
145 return clients[0]
146
147 def focus_next(self, win):
148 if win not in self.clients or win.group is None:
149 return
150
151 clients = self.find_clients(win.group)
152 idx = clients.index(win)
153 if len(clients) > idx + 1:
154 return clients[idx + 1]
155
156 def focus_last(self, group=None):
157 if group is None:
158 clients = self.clients
159 else:
160 clients = self.find_clients(group)
161
162 if clients:
163 return clients[-1]
164
165 def focus_previous(self, win):
166 if win not in self.clients or win.group is None:
167 return
168
169 clients = self.find_clients(win.group)
170 idx = clients.index(win)
171 if idx > 0:
172 return clients[idx - 1]
173
174 def focus(self, client):
175 self.focused = client
176
177 def blur(self):
178 self.focused = None
179
180 def compute_client_position(self, client, screen):
181 """ recompute client.x and client.y, returning whether or not to place
182 this client above other windows or not """
183 above = False
184 transient_for = client.window.get_wm_transient_for()
185 win = client.group.qtile.windows_map.get(transient_for)
186 if win is not None:
187 # if transient for a window, place in the center of the window
188 center_x = win.x + win.width / 2
189 center_y = win.y + win.height / 2
190 else:
191 center_x = screen.x + screen.width / 2
192 center_y = screen.y + screen.height / 2
193 above = True
194
195 x = center_x - client.width / 2
196 y = center_y - client.height / 2
197
198 # don't go off the right...
199 x = min(x, screen.x + screen.width)
200 # or left...
201 x = max(x, screen.x)
202 # or bottom...
203 y = min(y, screen.y + screen.height)
204 # or top
205 y = max(y, screen.y)
206
207 client.x = int(round(x))
208 client.y = int(round(y))
209 return above
210
211 def configure(self, client, screen):
212 # 'sun-awt-X11-XWindowPeer' is a dropdown used in Java application,
213 # don't reposition it anywhere, let Java app to control it
214 cls = client.window.get_wm_class() or ''
215 is_java_dropdown = 'sun-awt-X11-XWindowPeer' in cls
216 if is_java_dropdown:
217 return
218
219 # similar to above but the X11 version, the client may have already
220 # placed itself. let's respect that
221 if client.has_user_set_position():
222 return
223
224 # ok, it's not java and the window itself didn't position it, but users
225 # may still have asked us not to mess with it
226 if self.no_reposition_match is not None and self.no_reposition_match.compare(client):
227 return
228
229 if client.has_focus:
230 bc = client.group.qtile.color_pixel(self.border_focus)
231 else:
232 bc = client.group.qtile.color_pixel(self.border_normal)
233 if client.maximized:
234 bw = self.max_border_width
235 elif client.fullscreen:
236 bw = self.fullscreen_border_width
237 else:
238 bw = self.border_width
239 above = False
240
241 # We definitely have a screen here, so let's be sure we'll float on screen
242 try:
243 client.float_x
244 client.float_y
245 except AttributeError:
246 # this window hasn't been placed before, let's put it in a sensible spot
247 above = self.compute_client_position(client, screen)
248
249 client.place(
250 client.x,
251 client.y,
252 client.width,
253 client.height,
254 bw,
255 bc,
256 above,
257 )
258 client.unhide()
259
260 def add(self, client):
261 self.clients.append(client)
262 self.focused = client
263
264 def remove(self, client):
265 if client not in self.clients:
266 return
267
268 next_focus = self.focus_next(client)
269 if client is self.focused:
270 self.blur()
271 self.clients.remove(client)
272 return next_focus
273
274 def info(self):
275 d = Layout.info(self)
276 d["clients"] = [c.name for c in self.clients]
277 return d
278
279 def cmd_next(self):
280 # This can't ever be called, but implement the abstract method
281 pass
282
283 def cmd_previous(self):
284 # This can't ever be called, but implement the abstract method
285 pass
286
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/libqtile/layout/floating.py b/libqtile/layout/floating.py
--- a/libqtile/layout/floating.py
+++ b/libqtile/layout/floating.py
@@ -209,6 +209,10 @@
return above
def configure(self, client, screen):
+ # After this, the client will be mapped. Either this will do it, or the
+ # client has already done it.
+ client.hidden = False
+
# 'sun-awt-X11-XWindowPeer' is a dropdown used in Java application,
# don't reposition it anywhere, let Java app to control it
cls = client.window.get_wm_class() or ''
|
{"golden_diff": "diff --git a/libqtile/layout/floating.py b/libqtile/layout/floating.py\n--- a/libqtile/layout/floating.py\n+++ b/libqtile/layout/floating.py\n@@ -209,6 +209,10 @@\n return above\n \n def configure(self, client, screen):\n+ # After this, the client will be mapped. Either this will do it, or the\n+ # client has already done it.\n+ client.hidden = False\n+\n # 'sun-awt-X11-XWindowPeer' is a dropdown used in Java application,\n # don't reposition it anywhere, let Java app to control it\n cls = client.window.get_wm_class() or ''\n", "issue": "Cannot input in gmrun window\nThis commit 0f886606e1f35226b325f5405d43946976eae831 introduced a bug for `gmrun` windows at least. When spawning, one cannot input text into it. This is fixed by unfloating the window, and continues to work even when refloating it.\r\n\r\nWindow properties:\r\n\r\n```\r\n_NET_WM_USER_TIME(CARDINAL) = 5511537\r\n_NET_WM_DESKTOP(CARDINAL) = 1\r\nWM_STATE(WM_STATE):\r\n\t\twindow state: Normal\r\n\t\ticon window: 0x0\r\nWM_HINTS(WM_HINTS):\r\n\t\tClient accepts input or input focus: True\r\n\t\tInitial state is Normal State.\r\n\t\twindow id # of group leader: 0x1c00001\r\nXdndAware(ATOM) = BITMAP\r\n_MOTIF_DRAG_RECEIVER_INFO(_MOTIF_DRAG_RECEIVER_INFO) = 0x6c, 0x0, 0x5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10, 0x0, 0x0, 0x0\r\n_MOTIF_WM_HINTS(_MOTIF_WM_HINTS) = 0x2, 0x0, 0x2, 0x0, 0x0\r\n_NET_WM_SYNC_REQUEST_COUNTER(CARDINAL) = 29360133\r\n_NET_WM_WINDOW_TYPE(ATOM) = _NET_WM_WINDOW_TYPE_DIALOG\r\n_NET_WM_USER_TIME_WINDOW(WINDOW): window id # 0x1c00004\r\nWM_CLIENT_LEADER(WINDOW): window id # 0x1c00001\r\n_NET_WM_PID(CARDINAL) = 64030\r\nWM_LOCALE_NAME(STRING) = \"en_US.UTF-8\"\r\nWM_CLIENT_MACHINE(STRING) = \"corsair\"\r\nWM_NORMAL_HINTS(WM_SIZE_HINTS):\r\n\t\tprogram specified location: 0, 0\r\n\t\tprogram specified minimum size: 412 by 75\r\n\t\tprogram specified maximum size: 412 by 75\r\n\t\twindow gravity: NorthWest\r\nWM_PROTOCOLS(ATOM): protocols WM_DELETE_WINDOW, WM_TAKE_FOCUS, _NET_WM_PING, _NET_WM_SYNC_REQUEST\r\nWM_CLASS(STRING) = \"gmrun\", \"Gmrun\"\r\nWM_ICON_NAME(STRING) = \"Execute program feat. completion\"\r\n_NET_WM_ICON_NAME(UTF8_STRING) = \"Execute program feat. completion\"\r\nWM_NAME(STRING) = \"Execute program feat. completion\"\r\n_NET_WM_NAME(UTF8_STRING) = \"Execute program feat. completion\"\r\n```\r\n\r\nJust tell me if you need logs and I'll add some!\n", "before_files": [{"content": "# Copyright (c) 2010 matt\n# Copyright (c) 2010-2011 Paul Colomiets\n# Copyright (c) 2011 Mounier Florian\n# Copyright (c) 2012 Craig Barnes\n# Copyright (c) 2012, 2014-2015 Tycho Andersen\n# Copyright (c) 2013 Tao Sauvage\n# Copyright (c) 2013 Julien Iguchi-Cartigny\n# Copyright (c) 2014 ramnes\n# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2014 dequis\n# Copyright (c) 2018 Nazar Mokrynskyi\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom libqtile.layout.base import Layout\n\nDEFAULT_FLOAT_WM_TYPES = set([\n 'utility',\n 'notification',\n 'toolbar',\n 'splash',\n 'dialog',\n])\n\nDEFAULT_FLOAT_RULES = [\n {\"role\": \"About\"},\n {\"wmclass\": \"file_progress\"},\n]\n\n\nclass Floating(Layout):\n \"\"\"\n Floating layout, which does nothing with windows but handles focus order\n \"\"\"\n defaults = [\n (\"border_focus\", \"#0000ff\", \"Border colour for the focused window.\"),\n (\"border_normal\", \"#000000\", \"Border colour for un-focused windows.\"),\n (\"border_width\", 1, \"Border width.\"),\n (\"max_border_width\", 0, \"Border width for maximize.\"),\n (\"fullscreen_border_width\", 0, \"Border width for fullscreen.\"),\n (\"name\", \"floating\", \"Name of this layout.\"),\n (\n \"auto_float_types\",\n DEFAULT_FLOAT_WM_TYPES,\n \"default wm types to automatically float\"\n ),\n ]\n\n def __init__(self, float_rules=None, no_reposition_match=None, **config):\n \"\"\"\n If you have certain apps that you always want to float you can provide\n ``float_rules`` to do so. ``float_rules`` is a list of\n dictionaries containing some or all of the keys::\n\n {'wname': WM_NAME, 'wmclass': WM_CLASS, 'role': WM_WINDOW_ROLE}\n\n The keys must be specified as above. You only need one, but\n you need to provide the value for it. When a new window is\n opened it's ``match`` method is called with each of these\n rules. If one matches, the window will float. The following\n will float gimp and skype::\n\n float_rules=[dict(wmclass=\"skype\"), dict(wmclass=\"gimp\")]\n\n Specify these in the ``floating_layout`` in your config.\n\n Floating layout will try to center most of floating windows by default,\n but if you don't want this to happen for certain windows that are\n centered by mistake, you can use ``no_reposition_match`` option to\n specify them and layout will rely on windows to position themselves in\n correct location on the screen.\n \"\"\"\n Layout.__init__(self, **config)\n self.clients = []\n self.focused = None\n self.group = None\n self.float_rules = float_rules or DEFAULT_FLOAT_RULES\n self.no_reposition_match = no_reposition_match\n self.add_defaults(Floating.defaults)\n\n def match(self, win):\n \"\"\"Used to default float some windows\"\"\"\n if win.window.get_wm_type() in self.auto_float_types:\n return True\n for rule_dict in self.float_rules:\n if win.match(**rule_dict):\n return True\n return False\n\n def find_clients(self, group):\n \"\"\"Find all clients belonging to a given group\"\"\"\n return [c for c in self.clients if c.group is group]\n\n def to_screen(self, group, new_screen):\n \"\"\"Adjust offsets of clients within current screen\"\"\"\n for win in self.find_clients(group):\n if win.maximized:\n win.maximized = True\n elif win.fullscreen:\n win.fullscreen = True\n else:\n # catch if the client hasn't been configured\n try:\n # By default, place window at same offset from top corner\n new_x = new_screen.x + win.float_x\n new_y = new_screen.y + win.float_y\n except AttributeError:\n # this will be handled in .configure()\n pass\n else:\n # make sure window isn't off screen left/right...\n new_x = min(new_x, new_screen.x + new_screen.width - win.width)\n new_x = max(new_x, new_screen.x)\n # and up/down\n new_y = min(new_y, new_screen.y + new_screen.height - win.height)\n new_y = max(new_y, new_screen.y)\n\n win.x = new_x\n win.y = new_y\n win.group = new_screen.group\n\n def focus_first(self, group=None):\n if group is None:\n clients = self.clients\n else:\n clients = self.find_clients(group)\n\n if clients:\n return clients[0]\n\n def focus_next(self, win):\n if win not in self.clients or win.group is None:\n return\n\n clients = self.find_clients(win.group)\n idx = clients.index(win)\n if len(clients) > idx + 1:\n return clients[idx + 1]\n\n def focus_last(self, group=None):\n if group is None:\n clients = self.clients\n else:\n clients = self.find_clients(group)\n\n if clients:\n return clients[-1]\n\n def focus_previous(self, win):\n if win not in self.clients or win.group is None:\n return\n\n clients = self.find_clients(win.group)\n idx = clients.index(win)\n if idx > 0:\n return clients[idx - 1]\n\n def focus(self, client):\n self.focused = client\n\n def blur(self):\n self.focused = None\n\n def compute_client_position(self, client, screen):\n \"\"\" recompute client.x and client.y, returning whether or not to place\n this client above other windows or not \"\"\"\n above = False\n transient_for = client.window.get_wm_transient_for()\n win = client.group.qtile.windows_map.get(transient_for)\n if win is not None:\n # if transient for a window, place in the center of the window\n center_x = win.x + win.width / 2\n center_y = win.y + win.height / 2\n else:\n center_x = screen.x + screen.width / 2\n center_y = screen.y + screen.height / 2\n above = True\n\n x = center_x - client.width / 2\n y = center_y - client.height / 2\n\n # don't go off the right...\n x = min(x, screen.x + screen.width)\n # or left...\n x = max(x, screen.x)\n # or bottom...\n y = min(y, screen.y + screen.height)\n # or top\n y = max(y, screen.y)\n\n client.x = int(round(x))\n client.y = int(round(y))\n return above\n\n def configure(self, client, screen):\n # 'sun-awt-X11-XWindowPeer' is a dropdown used in Java application,\n # don't reposition it anywhere, let Java app to control it\n cls = client.window.get_wm_class() or ''\n is_java_dropdown = 'sun-awt-X11-XWindowPeer' in cls\n if is_java_dropdown:\n return\n\n # similar to above but the X11 version, the client may have already\n # placed itself. let's respect that\n if client.has_user_set_position():\n return\n\n # ok, it's not java and the window itself didn't position it, but users\n # may still have asked us not to mess with it\n if self.no_reposition_match is not None and self.no_reposition_match.compare(client):\n return\n\n if client.has_focus:\n bc = client.group.qtile.color_pixel(self.border_focus)\n else:\n bc = client.group.qtile.color_pixel(self.border_normal)\n if client.maximized:\n bw = self.max_border_width\n elif client.fullscreen:\n bw = self.fullscreen_border_width\n else:\n bw = self.border_width\n above = False\n\n # We definitely have a screen here, so let's be sure we'll float on screen\n try:\n client.float_x\n client.float_y\n except AttributeError:\n # this window hasn't been placed before, let's put it in a sensible spot\n above = self.compute_client_position(client, screen)\n\n client.place(\n client.x,\n client.y,\n client.width,\n client.height,\n bw,\n bc,\n above,\n )\n client.unhide()\n\n def add(self, client):\n self.clients.append(client)\n self.focused = client\n\n def remove(self, client):\n if client not in self.clients:\n return\n\n next_focus = self.focus_next(client)\n if client is self.focused:\n self.blur()\n self.clients.remove(client)\n return next_focus\n\n def info(self):\n d = Layout.info(self)\n d[\"clients\"] = [c.name for c in self.clients]\n return d\n\n def cmd_next(self):\n # This can't ever be called, but implement the abstract method\n pass\n\n def cmd_previous(self):\n # This can't ever be called, but implement the abstract method\n pass\n", "path": "libqtile/layout/floating.py"}], "after_files": [{"content": "# Copyright (c) 2010 matt\n# Copyright (c) 2010-2011 Paul Colomiets\n# Copyright (c) 2011 Mounier Florian\n# Copyright (c) 2012 Craig Barnes\n# Copyright (c) 2012, 2014-2015 Tycho Andersen\n# Copyright (c) 2013 Tao Sauvage\n# Copyright (c) 2013 Julien Iguchi-Cartigny\n# Copyright (c) 2014 ramnes\n# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2014 dequis\n# Copyright (c) 2018 Nazar Mokrynskyi\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom libqtile.layout.base import Layout\n\nDEFAULT_FLOAT_WM_TYPES = set([\n 'utility',\n 'notification',\n 'toolbar',\n 'splash',\n 'dialog',\n])\n\nDEFAULT_FLOAT_RULES = [\n {\"role\": \"About\"},\n {\"wmclass\": \"file_progress\"},\n]\n\n\nclass Floating(Layout):\n \"\"\"\n Floating layout, which does nothing with windows but handles focus order\n \"\"\"\n defaults = [\n (\"border_focus\", \"#0000ff\", \"Border colour for the focused window.\"),\n (\"border_normal\", \"#000000\", \"Border colour for un-focused windows.\"),\n (\"border_width\", 1, \"Border width.\"),\n (\"max_border_width\", 0, \"Border width for maximize.\"),\n (\"fullscreen_border_width\", 0, \"Border width for fullscreen.\"),\n (\"name\", \"floating\", \"Name of this layout.\"),\n (\n \"auto_float_types\",\n DEFAULT_FLOAT_WM_TYPES,\n \"default wm types to automatically float\"\n ),\n ]\n\n def __init__(self, float_rules=None, no_reposition_match=None, **config):\n \"\"\"\n If you have certain apps that you always want to float you can provide\n ``float_rules`` to do so. ``float_rules`` is a list of\n dictionaries containing some or all of the keys::\n\n {'wname': WM_NAME, 'wmclass': WM_CLASS, 'role': WM_WINDOW_ROLE}\n\n The keys must be specified as above. You only need one, but\n you need to provide the value for it. When a new window is\n opened it's ``match`` method is called with each of these\n rules. If one matches, the window will float. The following\n will float gimp and skype::\n\n float_rules=[dict(wmclass=\"skype\"), dict(wmclass=\"gimp\")]\n\n Specify these in the ``floating_layout`` in your config.\n\n Floating layout will try to center most of floating windows by default,\n but if you don't want this to happen for certain windows that are\n centered by mistake, you can use ``no_reposition_match`` option to\n specify them and layout will rely on windows to position themselves in\n correct location on the screen.\n \"\"\"\n Layout.__init__(self, **config)\n self.clients = []\n self.focused = None\n self.group = None\n self.float_rules = float_rules or DEFAULT_FLOAT_RULES\n self.no_reposition_match = no_reposition_match\n self.add_defaults(Floating.defaults)\n\n def match(self, win):\n \"\"\"Used to default float some windows\"\"\"\n if win.window.get_wm_type() in self.auto_float_types:\n return True\n for rule_dict in self.float_rules:\n if win.match(**rule_dict):\n return True\n return False\n\n def find_clients(self, group):\n \"\"\"Find all clients belonging to a given group\"\"\"\n return [c for c in self.clients if c.group is group]\n\n def to_screen(self, group, new_screen):\n \"\"\"Adjust offsets of clients within current screen\"\"\"\n for win in self.find_clients(group):\n if win.maximized:\n win.maximized = True\n elif win.fullscreen:\n win.fullscreen = True\n else:\n # catch if the client hasn't been configured\n try:\n # By default, place window at same offset from top corner\n new_x = new_screen.x + win.float_x\n new_y = new_screen.y + win.float_y\n except AttributeError:\n # this will be handled in .configure()\n pass\n else:\n # make sure window isn't off screen left/right...\n new_x = min(new_x, new_screen.x + new_screen.width - win.width)\n new_x = max(new_x, new_screen.x)\n # and up/down\n new_y = min(new_y, new_screen.y + new_screen.height - win.height)\n new_y = max(new_y, new_screen.y)\n\n win.x = new_x\n win.y = new_y\n win.group = new_screen.group\n\n def focus_first(self, group=None):\n if group is None:\n clients = self.clients\n else:\n clients = self.find_clients(group)\n\n if clients:\n return clients[0]\n\n def focus_next(self, win):\n if win not in self.clients or win.group is None:\n return\n\n clients = self.find_clients(win.group)\n idx = clients.index(win)\n if len(clients) > idx + 1:\n return clients[idx + 1]\n\n def focus_last(self, group=None):\n if group is None:\n clients = self.clients\n else:\n clients = self.find_clients(group)\n\n if clients:\n return clients[-1]\n\n def focus_previous(self, win):\n if win not in self.clients or win.group is None:\n return\n\n clients = self.find_clients(win.group)\n idx = clients.index(win)\n if idx > 0:\n return clients[idx - 1]\n\n def focus(self, client):\n self.focused = client\n\n def blur(self):\n self.focused = None\n\n def compute_client_position(self, client, screen):\n \"\"\" recompute client.x and client.y, returning whether or not to place\n this client above other windows or not \"\"\"\n above = False\n transient_for = client.window.get_wm_transient_for()\n win = client.group.qtile.windows_map.get(transient_for)\n if win is not None:\n # if transient for a window, place in the center of the window\n center_x = win.x + win.width / 2\n center_y = win.y + win.height / 2\n else:\n center_x = screen.x + screen.width / 2\n center_y = screen.y + screen.height / 2\n above = True\n\n x = center_x - client.width / 2\n y = center_y - client.height / 2\n\n # don't go off the right...\n x = min(x, screen.x + screen.width)\n # or left...\n x = max(x, screen.x)\n # or bottom...\n y = min(y, screen.y + screen.height)\n # or top\n y = max(y, screen.y)\n\n client.x = int(round(x))\n client.y = int(round(y))\n return above\n\n def configure(self, client, screen):\n # After this, the client will be mapped. Either this will do it, or the\n # client has already done it.\n client.hidden = False\n\n # 'sun-awt-X11-XWindowPeer' is a dropdown used in Java application,\n # don't reposition it anywhere, let Java app to control it\n cls = client.window.get_wm_class() or ''\n is_java_dropdown = 'sun-awt-X11-XWindowPeer' in cls\n if is_java_dropdown:\n return\n\n # similar to above but the X11 version, the client may have already\n # placed itself. let's respect that\n if client.has_user_set_position():\n return\n\n # ok, it's not java and the window itself didn't position it, but users\n # may still have asked us not to mess with it\n if self.no_reposition_match is not None and self.no_reposition_match.compare(client):\n return\n\n if client.has_focus:\n bc = client.group.qtile.color_pixel(self.border_focus)\n else:\n bc = client.group.qtile.color_pixel(self.border_normal)\n if client.maximized:\n bw = self.max_border_width\n elif client.fullscreen:\n bw = self.fullscreen_border_width\n else:\n bw = self.border_width\n above = False\n\n # We definitely have a screen here, so let's be sure we'll float on screen\n try:\n client.float_x\n client.float_y\n except AttributeError:\n # this window hasn't been placed before, let's put it in a sensible spot\n above = self.compute_client_position(client, screen)\n\n client.place(\n client.x,\n client.y,\n client.width,\n client.height,\n bw,\n bc,\n above,\n )\n client.unhide()\n\n def add(self, client):\n self.clients.append(client)\n self.focused = client\n\n def remove(self, client):\n if client not in self.clients:\n return\n\n next_focus = self.focus_next(client)\n if client is self.focused:\n self.blur()\n self.clients.remove(client)\n return next_focus\n\n def info(self):\n d = Layout.info(self)\n d[\"clients\"] = [c.name for c in self.clients]\n return d\n\n def cmd_next(self):\n # This can't ever be called, but implement the abstract method\n pass\n\n def cmd_previous(self):\n # This can't ever be called, but implement the abstract method\n pass\n", "path": "libqtile/layout/floating.py"}]}
| 3,950 | 156 |
gh_patches_debug_18191
|
rasdani/github-patches
|
git_diff
|
python-discord__bot-997
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Charinfo doesn't escape backticks
The Unicode escapes are formatted in code blocks. When the embed tries to also render a literal backtick, this ends up interfering with the code blocks and creating a mess.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bot/cogs/utils.py`
Content:
```
1 import difflib
2 import logging
3 import re
4 import unicodedata
5 from email.parser import HeaderParser
6 from io import StringIO
7 from typing import Tuple, Union
8
9 from discord import Colour, Embed
10 from discord.ext.commands import BadArgument, Cog, Context, command
11
12 from bot.bot import Bot
13 from bot.constants import Channels, MODERATION_ROLES, STAFF_ROLES
14 from bot.decorators import in_whitelist, with_role
15
16 log = logging.getLogger(__name__)
17
18 ZEN_OF_PYTHON = """\
19 Beautiful is better than ugly.
20 Explicit is better than implicit.
21 Simple is better than complex.
22 Complex is better than complicated.
23 Flat is better than nested.
24 Sparse is better than dense.
25 Readability counts.
26 Special cases aren't special enough to break the rules.
27 Although practicality beats purity.
28 Errors should never pass silently.
29 Unless explicitly silenced.
30 In the face of ambiguity, refuse the temptation to guess.
31 There should be one-- and preferably only one --obvious way to do it.
32 Although that way may not be obvious at first unless you're Dutch.
33 Now is better than never.
34 Although never is often better than *right* now.
35 If the implementation is hard to explain, it's a bad idea.
36 If the implementation is easy to explain, it may be a good idea.
37 Namespaces are one honking great idea -- let's do more of those!
38 """
39
40 ICON_URL = "https://www.python.org/static/opengraph-icon-200x200.png"
41
42
43 class Utils(Cog):
44 """A selection of utilities which don't have a clear category."""
45
46 def __init__(self, bot: Bot):
47 self.bot = bot
48
49 self.base_pep_url = "http://www.python.org/dev/peps/pep-"
50 self.base_github_pep_url = "https://raw.githubusercontent.com/python/peps/master/pep-"
51
52 @command(name='pep', aliases=('get_pep', 'p'))
53 async def pep_command(self, ctx: Context, pep_number: str) -> None:
54 """Fetches information about a PEP and sends it to the channel."""
55 if pep_number.isdigit():
56 pep_number = int(pep_number)
57 else:
58 await ctx.send_help(ctx.command)
59 return
60
61 # Handle PEP 0 directly because it's not in .rst or .txt so it can't be accessed like other PEPs.
62 if pep_number == 0:
63 return await self.send_pep_zero(ctx)
64
65 possible_extensions = ['.txt', '.rst']
66 found_pep = False
67 for extension in possible_extensions:
68 # Attempt to fetch the PEP
69 pep_url = f"{self.base_github_pep_url}{pep_number:04}{extension}"
70 log.trace(f"Requesting PEP {pep_number} with {pep_url}")
71 response = await self.bot.http_session.get(pep_url)
72
73 if response.status == 200:
74 log.trace("PEP found")
75 found_pep = True
76
77 pep_content = await response.text()
78
79 # Taken from https://github.com/python/peps/blob/master/pep0/pep.py#L179
80 pep_header = HeaderParser().parse(StringIO(pep_content))
81
82 # Assemble the embed
83 pep_embed = Embed(
84 title=f"**PEP {pep_number} - {pep_header['Title']}**",
85 description=f"[Link]({self.base_pep_url}{pep_number:04})",
86 )
87
88 pep_embed.set_thumbnail(url=ICON_URL)
89
90 # Add the interesting information
91 fields_to_check = ("Status", "Python-Version", "Created", "Type")
92 for field in fields_to_check:
93 # Check for a PEP metadata field that is present but has an empty value
94 # embed field values can't contain an empty string
95 if pep_header.get(field, ""):
96 pep_embed.add_field(name=field, value=pep_header[field])
97
98 elif response.status != 404:
99 # any response except 200 and 404 is expected
100 found_pep = True # actually not, but it's easier to display this way
101 log.trace(f"The user requested PEP {pep_number}, but the response had an unexpected status code: "
102 f"{response.status}.\n{response.text}")
103
104 error_message = "Unexpected HTTP error during PEP search. Please let us know."
105 pep_embed = Embed(title="Unexpected error", description=error_message)
106 pep_embed.colour = Colour.red()
107 break
108
109 if not found_pep:
110 log.trace("PEP was not found")
111 not_found = f"PEP {pep_number} does not exist."
112 pep_embed = Embed(title="PEP not found", description=not_found)
113 pep_embed.colour = Colour.red()
114
115 await ctx.message.channel.send(embed=pep_embed)
116
117 @command()
118 @in_whitelist(channels=(Channels.bot_commands,), roles=STAFF_ROLES)
119 async def charinfo(self, ctx: Context, *, characters: str) -> None:
120 """Shows you information on up to 25 unicode characters."""
121 match = re.match(r"<(a?):(\w+):(\d+)>", characters)
122 if match:
123 embed = Embed(
124 title="Non-Character Detected",
125 description=(
126 "Only unicode characters can be processed, but a custom Discord emoji "
127 "was found. Please remove it and try again."
128 )
129 )
130 embed.colour = Colour.red()
131 await ctx.send(embed=embed)
132 return
133
134 if len(characters) > 25:
135 embed = Embed(title=f"Too many characters ({len(characters)}/25)")
136 embed.colour = Colour.red()
137 await ctx.send(embed=embed)
138 return
139
140 def get_info(char: str) -> Tuple[str, str]:
141 digit = f"{ord(char):x}"
142 if len(digit) <= 4:
143 u_code = f"\\u{digit:>04}"
144 else:
145 u_code = f"\\U{digit:>08}"
146 url = f"https://www.compart.com/en/unicode/U+{digit:>04}"
147 name = f"[{unicodedata.name(char, '')}]({url})"
148 info = f"`{u_code.ljust(10)}`: {name} - {char}"
149 return info, u_code
150
151 charlist, rawlist = zip(*(get_info(c) for c in characters))
152
153 embed = Embed(description="\n".join(charlist))
154 embed.set_author(name="Character Info")
155
156 if len(characters) > 1:
157 embed.add_field(name='Raw', value=f"`{''.join(rawlist)}`", inline=False)
158
159 await ctx.send(embed=embed)
160
161 @command()
162 async def zen(self, ctx: Context, *, search_value: Union[int, str, None] = None) -> None:
163 """
164 Show the Zen of Python.
165
166 Without any arguments, the full Zen will be produced.
167 If an integer is provided, the line with that index will be produced.
168 If a string is provided, the line which matches best will be produced.
169 """
170 embed = Embed(
171 colour=Colour.blurple(),
172 title="The Zen of Python",
173 description=ZEN_OF_PYTHON
174 )
175
176 if search_value is None:
177 embed.title += ", by Tim Peters"
178 await ctx.send(embed=embed)
179 return
180
181 zen_lines = ZEN_OF_PYTHON.splitlines()
182
183 # handle if it's an index int
184 if isinstance(search_value, int):
185 upper_bound = len(zen_lines) - 1
186 lower_bound = -1 * upper_bound
187 if not (lower_bound <= search_value <= upper_bound):
188 raise BadArgument(f"Please provide an index between {lower_bound} and {upper_bound}.")
189
190 embed.title += f" (line {search_value % len(zen_lines)}):"
191 embed.description = zen_lines[search_value]
192 await ctx.send(embed=embed)
193 return
194
195 # Try to handle first exact word due difflib.SequenceMatched may use some other similar word instead
196 # exact word.
197 for i, line in enumerate(zen_lines):
198 for word in line.split():
199 if word.lower() == search_value.lower():
200 embed.title += f" (line {i}):"
201 embed.description = line
202 await ctx.send(embed=embed)
203 return
204
205 # handle if it's a search string and not exact word
206 matcher = difflib.SequenceMatcher(None, search_value.lower())
207
208 best_match = ""
209 match_index = 0
210 best_ratio = 0
211
212 for index, line in enumerate(zen_lines):
213 matcher.set_seq2(line.lower())
214
215 # the match ratio needs to be adjusted because, naturally,
216 # longer lines will have worse ratios than shorter lines when
217 # fuzzy searching for keywords. this seems to work okay.
218 adjusted_ratio = (len(line) - 5) ** 0.5 * matcher.ratio()
219
220 if adjusted_ratio > best_ratio:
221 best_ratio = adjusted_ratio
222 best_match = line
223 match_index = index
224
225 if not best_match:
226 raise BadArgument("I didn't get a match! Please try again with a different search term.")
227
228 embed.title += f" (line {match_index}):"
229 embed.description = best_match
230 await ctx.send(embed=embed)
231
232 @command(aliases=("poll",))
233 @with_role(*MODERATION_ROLES)
234 async def vote(self, ctx: Context, title: str, *options: str) -> None:
235 """
236 Build a quick voting poll with matching reactions with the provided options.
237
238 A maximum of 20 options can be provided, as Discord supports a max of 20
239 reactions on a single message.
240 """
241 if len(options) < 2:
242 raise BadArgument("Please provide at least 2 options.")
243 if len(options) > 20:
244 raise BadArgument("I can only handle 20 options!")
245
246 codepoint_start = 127462 # represents "regional_indicator_a" unicode value
247 options = {chr(i): f"{chr(i)} - {v}" for i, v in enumerate(options, start=codepoint_start)}
248 embed = Embed(title=title, description="\n".join(options.values()))
249 message = await ctx.send(embed=embed)
250 for reaction in options:
251 await message.add_reaction(reaction)
252
253 async def send_pep_zero(self, ctx: Context) -> None:
254 """Send information about PEP 0."""
255 pep_embed = Embed(
256 title="**PEP 0 - Index of Python Enhancement Proposals (PEPs)**",
257 description="[Link](https://www.python.org/dev/peps/)"
258 )
259 pep_embed.set_thumbnail(url=ICON_URL)
260 pep_embed.add_field(name="Status", value="Active")
261 pep_embed.add_field(name="Created", value="13-Jul-2000")
262 pep_embed.add_field(name="Type", value="Informational")
263
264 await ctx.send(embed=pep_embed)
265
266
267 def setup(bot: Bot) -> None:
268 """Load the Utils cog."""
269 bot.add_cog(Utils(bot))
270
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bot/cogs/utils.py b/bot/cogs/utils.py
--- a/bot/cogs/utils.py
+++ b/bot/cogs/utils.py
@@ -6,7 +6,7 @@
from io import StringIO
from typing import Tuple, Union
-from discord import Colour, Embed
+from discord import Colour, Embed, utils
from discord.ext.commands import BadArgument, Cog, Context, command
from bot.bot import Bot
@@ -145,7 +145,7 @@
u_code = f"\\U{digit:>08}"
url = f"https://www.compart.com/en/unicode/U+{digit:>04}"
name = f"[{unicodedata.name(char, '')}]({url})"
- info = f"`{u_code.ljust(10)}`: {name} - {char}"
+ info = f"`{u_code.ljust(10)}`: {name} - {utils.escape_markdown(char)}"
return info, u_code
charlist, rawlist = zip(*(get_info(c) for c in characters))
|
{"golden_diff": "diff --git a/bot/cogs/utils.py b/bot/cogs/utils.py\n--- a/bot/cogs/utils.py\n+++ b/bot/cogs/utils.py\n@@ -6,7 +6,7 @@\n from io import StringIO\n from typing import Tuple, Union\n \n-from discord import Colour, Embed\n+from discord import Colour, Embed, utils\n from discord.ext.commands import BadArgument, Cog, Context, command\n \n from bot.bot import Bot\n@@ -145,7 +145,7 @@\n u_code = f\"\\\\U{digit:>08}\"\n url = f\"https://www.compart.com/en/unicode/U+{digit:>04}\"\n name = f\"[{unicodedata.name(char, '')}]({url})\"\n- info = f\"`{u_code.ljust(10)}`: {name} - {char}\"\n+ info = f\"`{u_code.ljust(10)}`: {name} - {utils.escape_markdown(char)}\"\n return info, u_code\n \n charlist, rawlist = zip(*(get_info(c) for c in characters))\n", "issue": "Charinfo doesn't escape backticks\nThe Unicode escapes are formatted in code blocks. When the embed tries to also render a literal backtick, this ends up interfering with the code blocks and creating a mess.\r\n\r\n\r\n\n", "before_files": [{"content": "import difflib\nimport logging\nimport re\nimport unicodedata\nfrom email.parser import HeaderParser\nfrom io import StringIO\nfrom typing import Tuple, Union\n\nfrom discord import Colour, Embed\nfrom discord.ext.commands import BadArgument, Cog, Context, command\n\nfrom bot.bot import Bot\nfrom bot.constants import Channels, MODERATION_ROLES, STAFF_ROLES\nfrom bot.decorators import in_whitelist, with_role\n\nlog = logging.getLogger(__name__)\n\nZEN_OF_PYTHON = \"\"\"\\\nBeautiful is better than ugly.\nExplicit is better than implicit.\nSimple is better than complex.\nComplex is better than complicated.\nFlat is better than nested.\nSparse is better than dense.\nReadability counts.\nSpecial cases aren't special enough to break the rules.\nAlthough practicality beats purity.\nErrors should never pass silently.\nUnless explicitly silenced.\nIn the face of ambiguity, refuse the temptation to guess.\nThere should be one-- and preferably only one --obvious way to do it.\nAlthough that way may not be obvious at first unless you're Dutch.\nNow is better than never.\nAlthough never is often better than *right* now.\nIf the implementation is hard to explain, it's a bad idea.\nIf the implementation is easy to explain, it may be a good idea.\nNamespaces are one honking great idea -- let's do more of those!\n\"\"\"\n\nICON_URL = \"https://www.python.org/static/opengraph-icon-200x200.png\"\n\n\nclass Utils(Cog):\n \"\"\"A selection of utilities which don't have a clear category.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n self.base_pep_url = \"http://www.python.org/dev/peps/pep-\"\n self.base_github_pep_url = \"https://raw.githubusercontent.com/python/peps/master/pep-\"\n\n @command(name='pep', aliases=('get_pep', 'p'))\n async def pep_command(self, ctx: Context, pep_number: str) -> None:\n \"\"\"Fetches information about a PEP and sends it to the channel.\"\"\"\n if pep_number.isdigit():\n pep_number = int(pep_number)\n else:\n await ctx.send_help(ctx.command)\n return\n\n # Handle PEP 0 directly because it's not in .rst or .txt so it can't be accessed like other PEPs.\n if pep_number == 0:\n return await self.send_pep_zero(ctx)\n\n possible_extensions = ['.txt', '.rst']\n found_pep = False\n for extension in possible_extensions:\n # Attempt to fetch the PEP\n pep_url = f\"{self.base_github_pep_url}{pep_number:04}{extension}\"\n log.trace(f\"Requesting PEP {pep_number} with {pep_url}\")\n response = await self.bot.http_session.get(pep_url)\n\n if response.status == 200:\n log.trace(\"PEP found\")\n found_pep = True\n\n pep_content = await response.text()\n\n # Taken from https://github.com/python/peps/blob/master/pep0/pep.py#L179\n pep_header = HeaderParser().parse(StringIO(pep_content))\n\n # Assemble the embed\n pep_embed = Embed(\n title=f\"**PEP {pep_number} - {pep_header['Title']}**\",\n description=f\"[Link]({self.base_pep_url}{pep_number:04})\",\n )\n\n pep_embed.set_thumbnail(url=ICON_URL)\n\n # Add the interesting information\n fields_to_check = (\"Status\", \"Python-Version\", \"Created\", \"Type\")\n for field in fields_to_check:\n # Check for a PEP metadata field that is present but has an empty value\n # embed field values can't contain an empty string\n if pep_header.get(field, \"\"):\n pep_embed.add_field(name=field, value=pep_header[field])\n\n elif response.status != 404:\n # any response except 200 and 404 is expected\n found_pep = True # actually not, but it's easier to display this way\n log.trace(f\"The user requested PEP {pep_number}, but the response had an unexpected status code: \"\n f\"{response.status}.\\n{response.text}\")\n\n error_message = \"Unexpected HTTP error during PEP search. Please let us know.\"\n pep_embed = Embed(title=\"Unexpected error\", description=error_message)\n pep_embed.colour = Colour.red()\n break\n\n if not found_pep:\n log.trace(\"PEP was not found\")\n not_found = f\"PEP {pep_number} does not exist.\"\n pep_embed = Embed(title=\"PEP not found\", description=not_found)\n pep_embed.colour = Colour.red()\n\n await ctx.message.channel.send(embed=pep_embed)\n\n @command()\n @in_whitelist(channels=(Channels.bot_commands,), roles=STAFF_ROLES)\n async def charinfo(self, ctx: Context, *, characters: str) -> None:\n \"\"\"Shows you information on up to 25 unicode characters.\"\"\"\n match = re.match(r\"<(a?):(\\w+):(\\d+)>\", characters)\n if match:\n embed = Embed(\n title=\"Non-Character Detected\",\n description=(\n \"Only unicode characters can be processed, but a custom Discord emoji \"\n \"was found. Please remove it and try again.\"\n )\n )\n embed.colour = Colour.red()\n await ctx.send(embed=embed)\n return\n\n if len(characters) > 25:\n embed = Embed(title=f\"Too many characters ({len(characters)}/25)\")\n embed.colour = Colour.red()\n await ctx.send(embed=embed)\n return\n\n def get_info(char: str) -> Tuple[str, str]:\n digit = f\"{ord(char):x}\"\n if len(digit) <= 4:\n u_code = f\"\\\\u{digit:>04}\"\n else:\n u_code = f\"\\\\U{digit:>08}\"\n url = f\"https://www.compart.com/en/unicode/U+{digit:>04}\"\n name = f\"[{unicodedata.name(char, '')}]({url})\"\n info = f\"`{u_code.ljust(10)}`: {name} - {char}\"\n return info, u_code\n\n charlist, rawlist = zip(*(get_info(c) for c in characters))\n\n embed = Embed(description=\"\\n\".join(charlist))\n embed.set_author(name=\"Character Info\")\n\n if len(characters) > 1:\n embed.add_field(name='Raw', value=f\"`{''.join(rawlist)}`\", inline=False)\n\n await ctx.send(embed=embed)\n\n @command()\n async def zen(self, ctx: Context, *, search_value: Union[int, str, None] = None) -> None:\n \"\"\"\n Show the Zen of Python.\n\n Without any arguments, the full Zen will be produced.\n If an integer is provided, the line with that index will be produced.\n If a string is provided, the line which matches best will be produced.\n \"\"\"\n embed = Embed(\n colour=Colour.blurple(),\n title=\"The Zen of Python\",\n description=ZEN_OF_PYTHON\n )\n\n if search_value is None:\n embed.title += \", by Tim Peters\"\n await ctx.send(embed=embed)\n return\n\n zen_lines = ZEN_OF_PYTHON.splitlines()\n\n # handle if it's an index int\n if isinstance(search_value, int):\n upper_bound = len(zen_lines) - 1\n lower_bound = -1 * upper_bound\n if not (lower_bound <= search_value <= upper_bound):\n raise BadArgument(f\"Please provide an index between {lower_bound} and {upper_bound}.\")\n\n embed.title += f\" (line {search_value % len(zen_lines)}):\"\n embed.description = zen_lines[search_value]\n await ctx.send(embed=embed)\n return\n\n # Try to handle first exact word due difflib.SequenceMatched may use some other similar word instead\n # exact word.\n for i, line in enumerate(zen_lines):\n for word in line.split():\n if word.lower() == search_value.lower():\n embed.title += f\" (line {i}):\"\n embed.description = line\n await ctx.send(embed=embed)\n return\n\n # handle if it's a search string and not exact word\n matcher = difflib.SequenceMatcher(None, search_value.lower())\n\n best_match = \"\"\n match_index = 0\n best_ratio = 0\n\n for index, line in enumerate(zen_lines):\n matcher.set_seq2(line.lower())\n\n # the match ratio needs to be adjusted because, naturally,\n # longer lines will have worse ratios than shorter lines when\n # fuzzy searching for keywords. this seems to work okay.\n adjusted_ratio = (len(line) - 5) ** 0.5 * matcher.ratio()\n\n if adjusted_ratio > best_ratio:\n best_ratio = adjusted_ratio\n best_match = line\n match_index = index\n\n if not best_match:\n raise BadArgument(\"I didn't get a match! Please try again with a different search term.\")\n\n embed.title += f\" (line {match_index}):\"\n embed.description = best_match\n await ctx.send(embed=embed)\n\n @command(aliases=(\"poll\",))\n @with_role(*MODERATION_ROLES)\n async def vote(self, ctx: Context, title: str, *options: str) -> None:\n \"\"\"\n Build a quick voting poll with matching reactions with the provided options.\n\n A maximum of 20 options can be provided, as Discord supports a max of 20\n reactions on a single message.\n \"\"\"\n if len(options) < 2:\n raise BadArgument(\"Please provide at least 2 options.\")\n if len(options) > 20:\n raise BadArgument(\"I can only handle 20 options!\")\n\n codepoint_start = 127462 # represents \"regional_indicator_a\" unicode value\n options = {chr(i): f\"{chr(i)} - {v}\" for i, v in enumerate(options, start=codepoint_start)}\n embed = Embed(title=title, description=\"\\n\".join(options.values()))\n message = await ctx.send(embed=embed)\n for reaction in options:\n await message.add_reaction(reaction)\n\n async def send_pep_zero(self, ctx: Context) -> None:\n \"\"\"Send information about PEP 0.\"\"\"\n pep_embed = Embed(\n title=\"**PEP 0 - Index of Python Enhancement Proposals (PEPs)**\",\n description=\"[Link](https://www.python.org/dev/peps/)\"\n )\n pep_embed.set_thumbnail(url=ICON_URL)\n pep_embed.add_field(name=\"Status\", value=\"Active\")\n pep_embed.add_field(name=\"Created\", value=\"13-Jul-2000\")\n pep_embed.add_field(name=\"Type\", value=\"Informational\")\n\n await ctx.send(embed=pep_embed)\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the Utils cog.\"\"\"\n bot.add_cog(Utils(bot))\n", "path": "bot/cogs/utils.py"}], "after_files": [{"content": "import difflib\nimport logging\nimport re\nimport unicodedata\nfrom email.parser import HeaderParser\nfrom io import StringIO\nfrom typing import Tuple, Union\n\nfrom discord import Colour, Embed, utils\nfrom discord.ext.commands import BadArgument, Cog, Context, command\n\nfrom bot.bot import Bot\nfrom bot.constants import Channels, MODERATION_ROLES, STAFF_ROLES\nfrom bot.decorators import in_whitelist, with_role\n\nlog = logging.getLogger(__name__)\n\nZEN_OF_PYTHON = \"\"\"\\\nBeautiful is better than ugly.\nExplicit is better than implicit.\nSimple is better than complex.\nComplex is better than complicated.\nFlat is better than nested.\nSparse is better than dense.\nReadability counts.\nSpecial cases aren't special enough to break the rules.\nAlthough practicality beats purity.\nErrors should never pass silently.\nUnless explicitly silenced.\nIn the face of ambiguity, refuse the temptation to guess.\nThere should be one-- and preferably only one --obvious way to do it.\nAlthough that way may not be obvious at first unless you're Dutch.\nNow is better than never.\nAlthough never is often better than *right* now.\nIf the implementation is hard to explain, it's a bad idea.\nIf the implementation is easy to explain, it may be a good idea.\nNamespaces are one honking great idea -- let's do more of those!\n\"\"\"\n\nICON_URL = \"https://www.python.org/static/opengraph-icon-200x200.png\"\n\n\nclass Utils(Cog):\n \"\"\"A selection of utilities which don't have a clear category.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n self.base_pep_url = \"http://www.python.org/dev/peps/pep-\"\n self.base_github_pep_url = \"https://raw.githubusercontent.com/python/peps/master/pep-\"\n\n @command(name='pep', aliases=('get_pep', 'p'))\n async def pep_command(self, ctx: Context, pep_number: str) -> None:\n \"\"\"Fetches information about a PEP and sends it to the channel.\"\"\"\n if pep_number.isdigit():\n pep_number = int(pep_number)\n else:\n await ctx.send_help(ctx.command)\n return\n\n # Handle PEP 0 directly because it's not in .rst or .txt so it can't be accessed like other PEPs.\n if pep_number == 0:\n return await self.send_pep_zero(ctx)\n\n possible_extensions = ['.txt', '.rst']\n found_pep = False\n for extension in possible_extensions:\n # Attempt to fetch the PEP\n pep_url = f\"{self.base_github_pep_url}{pep_number:04}{extension}\"\n log.trace(f\"Requesting PEP {pep_number} with {pep_url}\")\n response = await self.bot.http_session.get(pep_url)\n\n if response.status == 200:\n log.trace(\"PEP found\")\n found_pep = True\n\n pep_content = await response.text()\n\n # Taken from https://github.com/python/peps/blob/master/pep0/pep.py#L179\n pep_header = HeaderParser().parse(StringIO(pep_content))\n\n # Assemble the embed\n pep_embed = Embed(\n title=f\"**PEP {pep_number} - {pep_header['Title']}**\",\n description=f\"[Link]({self.base_pep_url}{pep_number:04})\",\n )\n\n pep_embed.set_thumbnail(url=ICON_URL)\n\n # Add the interesting information\n fields_to_check = (\"Status\", \"Python-Version\", \"Created\", \"Type\")\n for field in fields_to_check:\n # Check for a PEP metadata field that is present but has an empty value\n # embed field values can't contain an empty string\n if pep_header.get(field, \"\"):\n pep_embed.add_field(name=field, value=pep_header[field])\n\n elif response.status != 404:\n # any response except 200 and 404 is expected\n found_pep = True # actually not, but it's easier to display this way\n log.trace(f\"The user requested PEP {pep_number}, but the response had an unexpected status code: \"\n f\"{response.status}.\\n{response.text}\")\n\n error_message = \"Unexpected HTTP error during PEP search. Please let us know.\"\n pep_embed = Embed(title=\"Unexpected error\", description=error_message)\n pep_embed.colour = Colour.red()\n break\n\n if not found_pep:\n log.trace(\"PEP was not found\")\n not_found = f\"PEP {pep_number} does not exist.\"\n pep_embed = Embed(title=\"PEP not found\", description=not_found)\n pep_embed.colour = Colour.red()\n\n await ctx.message.channel.send(embed=pep_embed)\n\n @command()\n @in_whitelist(channels=(Channels.bot_commands,), roles=STAFF_ROLES)\n async def charinfo(self, ctx: Context, *, characters: str) -> None:\n \"\"\"Shows you information on up to 25 unicode characters.\"\"\"\n match = re.match(r\"<(a?):(\\w+):(\\d+)>\", characters)\n if match:\n embed = Embed(\n title=\"Non-Character Detected\",\n description=(\n \"Only unicode characters can be processed, but a custom Discord emoji \"\n \"was found. Please remove it and try again.\"\n )\n )\n embed.colour = Colour.red()\n await ctx.send(embed=embed)\n return\n\n if len(characters) > 25:\n embed = Embed(title=f\"Too many characters ({len(characters)}/25)\")\n embed.colour = Colour.red()\n await ctx.send(embed=embed)\n return\n\n def get_info(char: str) -> Tuple[str, str]:\n digit = f\"{ord(char):x}\"\n if len(digit) <= 4:\n u_code = f\"\\\\u{digit:>04}\"\n else:\n u_code = f\"\\\\U{digit:>08}\"\n url = f\"https://www.compart.com/en/unicode/U+{digit:>04}\"\n name = f\"[{unicodedata.name(char, '')}]({url})\"\n info = f\"`{u_code.ljust(10)}`: {name} - {utils.escape_markdown(char)}\"\n return info, u_code\n\n charlist, rawlist = zip(*(get_info(c) for c in characters))\n\n embed = Embed(description=\"\\n\".join(charlist))\n embed.set_author(name=\"Character Info\")\n\n if len(characters) > 1:\n embed.add_field(name='Raw', value=f\"`{''.join(rawlist)}`\", inline=False)\n\n await ctx.send(embed=embed)\n\n @command()\n async def zen(self, ctx: Context, *, search_value: Union[int, str, None] = None) -> None:\n \"\"\"\n Show the Zen of Python.\n\n Without any arguments, the full Zen will be produced.\n If an integer is provided, the line with that index will be produced.\n If a string is provided, the line which matches best will be produced.\n \"\"\"\n embed = Embed(\n colour=Colour.blurple(),\n title=\"The Zen of Python\",\n description=ZEN_OF_PYTHON\n )\n\n if search_value is None:\n embed.title += \", by Tim Peters\"\n await ctx.send(embed=embed)\n return\n\n zen_lines = ZEN_OF_PYTHON.splitlines()\n\n # handle if it's an index int\n if isinstance(search_value, int):\n upper_bound = len(zen_lines) - 1\n lower_bound = -1 * upper_bound\n if not (lower_bound <= search_value <= upper_bound):\n raise BadArgument(f\"Please provide an index between {lower_bound} and {upper_bound}.\")\n\n embed.title += f\" (line {search_value % len(zen_lines)}):\"\n embed.description = zen_lines[search_value]\n await ctx.send(embed=embed)\n return\n\n # Try to handle first exact word due difflib.SequenceMatched may use some other similar word instead\n # exact word.\n for i, line in enumerate(zen_lines):\n for word in line.split():\n if word.lower() == search_value.lower():\n embed.title += f\" (line {i}):\"\n embed.description = line\n await ctx.send(embed=embed)\n return\n\n # handle if it's a search string and not exact word\n matcher = difflib.SequenceMatcher(None, search_value.lower())\n\n best_match = \"\"\n match_index = 0\n best_ratio = 0\n\n for index, line in enumerate(zen_lines):\n matcher.set_seq2(line.lower())\n\n # the match ratio needs to be adjusted because, naturally,\n # longer lines will have worse ratios than shorter lines when\n # fuzzy searching for keywords. this seems to work okay.\n adjusted_ratio = (len(line) - 5) ** 0.5 * matcher.ratio()\n\n if adjusted_ratio > best_ratio:\n best_ratio = adjusted_ratio\n best_match = line\n match_index = index\n\n if not best_match:\n raise BadArgument(\"I didn't get a match! Please try again with a different search term.\")\n\n embed.title += f\" (line {match_index}):\"\n embed.description = best_match\n await ctx.send(embed=embed)\n\n @command(aliases=(\"poll\",))\n @with_role(*MODERATION_ROLES)\n async def vote(self, ctx: Context, title: str, *options: str) -> None:\n \"\"\"\n Build a quick voting poll with matching reactions with the provided options.\n\n A maximum of 20 options can be provided, as Discord supports a max of 20\n reactions on a single message.\n \"\"\"\n if len(options) < 2:\n raise BadArgument(\"Please provide at least 2 options.\")\n if len(options) > 20:\n raise BadArgument(\"I can only handle 20 options!\")\n\n codepoint_start = 127462 # represents \"regional_indicator_a\" unicode value\n options = {chr(i): f\"{chr(i)} - {v}\" for i, v in enumerate(options, start=codepoint_start)}\n embed = Embed(title=title, description=\"\\n\".join(options.values()))\n message = await ctx.send(embed=embed)\n for reaction in options:\n await message.add_reaction(reaction)\n\n async def send_pep_zero(self, ctx: Context) -> None:\n \"\"\"Send information about PEP 0.\"\"\"\n pep_embed = Embed(\n title=\"**PEP 0 - Index of Python Enhancement Proposals (PEPs)**\",\n description=\"[Link](https://www.python.org/dev/peps/)\"\n )\n pep_embed.set_thumbnail(url=ICON_URL)\n pep_embed.add_field(name=\"Status\", value=\"Active\")\n pep_embed.add_field(name=\"Created\", value=\"13-Jul-2000\")\n pep_embed.add_field(name=\"Type\", value=\"Informational\")\n\n await ctx.send(embed=pep_embed)\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the Utils cog.\"\"\"\n bot.add_cog(Utils(bot))\n", "path": "bot/cogs/utils.py"}]}
| 3,514 | 239 |
gh_patches_debug_18828
|
rasdani/github-patches
|
git_diff
|
platformsh__platformsh-docs-2079
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add keywords for search
### Where on docs.platform.sh should be changed?
/configuration/app/app-reference.html
### What exactly should be updated?
We'd like specific pages to be findable by searching for specific words. For example, the app reference when searching for `.platform.app.yaml` (this may also involve a problem with escaping characters like `.`). Add keywords or other to make these pages findable.
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `search/main.py`
Content:
```
1 import os
2 import glob
3 import json
4 import meilisearch
5 from platformshconfig import Config
6
7 class Search:
8 def __init__(self):
9 self.default = {
10 "host": "http://127.0.0.1",
11 "key": None,
12 "port": 7700
13 }
14
15 self.scrape_dir = "output"
16 self.scrape_config = "config/scrape.json"
17 self.docs_index = "docs"
18 self.primaryKey = "documentId"
19 self.index_name = "Docs"
20
21 # Below are Platform.sh custom settings for how the search engine functions.
22
23 # Data available to the dropdown React app in docs, used to fill out autocomplete results.
24 self.displayed_attributes = ['title', 'text', 'url', 'site', 'section']
25 # Data actually searchable by our queries.
26 self.searchable_attributes = ['title', 'pageUrl', 'section', 'url', 'text']
27
28 # Show results for one query with the listed pages, when they by default would not show up as best results.
29 # Note: these aren't automatically two-way, which is why they're all defined twice.
30 self.synonyms = {
31 "cron": ["crons"],
32 "crons": ["cron tasks", "cron jobs"],
33 "e-mail": ["email"],
34 "routes.yaml": ["routes"],
35 "routes": ["routes.yaml"],
36 "services": ["services.yaml"],
37 "services.yaml": ["services"],
38 "application": [".platform.app.yaml", "app.yaml", "applications.yaml"],
39 ".platform.app.yaml": ["application"],
40 "app.yaml": ["application"],
41 "applications.yaml": ["application", "multi-app"],
42 "multi-app": ["applications.yaml"],
43 "regions": ["public ip addresses"],
44 "public ip addresses": ["regions"],
45 "ssl": ["https", "tls"],
46 "https": ["ssl"],
47 }
48
49 # Ranking rules:
50 #
51 # - Default order: ["words", "typo", "proximity", "attribute", "sort", "exactness"]
52 #
53 # - words: number of times query is in document (greater number gets priority)
54 # - typo: fewer typos > more typos
55 # - proximity: smaller distance between multiple occurences of query in same document > larger distances
56 # - attribute: sorted according to order of importance of attributes (searchable_attributes). terms in
57 # more important attributes first.
58 # - sort: queries are sorted at query time
59 # - exactness: similarity of matched words in document with query
60
61 self.ranking_rules = ["rank:asc", "attribute", "typo", "words", "proximity", "exactness"]
62
63 self.updated_settings = {
64 "rankingRules": self.ranking_rules,
65 "searchableAttributes": self.searchable_attributes,
66 "displayedAttributes": self.displayed_attributes
67 }
68
69 # Group results by page
70 self.distinct_attribute = "pageUrl"
71
72 def getConnectionString(self):
73 """
74 Sets the Meilisearch host string, depending on the environment.
75
76 Returns:
77 string: Meilisearch host string.
78 """
79 if os.environ.get('PORT'):
80 return "{}:{}".format(self.default["host"], os.environ['PORT'])
81 else:
82 return "{}:{}".format(self.default["host"], self.default["port"])
83
84 def getMasterKey(self):
85 """
86 Retrieves the Meilisearch master key, either from the Platform.sh environment or locally.
87 """
88 config = Config()
89 if config.is_valid_platform():
90 return config.projectEntropy
91 elif os.environ.get("MEILI_MASTER_KEY"):
92 return os.environ["MEILI_MASTER_KEY"]
93 else:
94 return self.default["key"]
95
96 def add_documents(self, index):
97 """
98 Cycle through the individual site indexes in /outputs so their individual documents can be added to Meilisearch.
99 """
100 documents = [f for f in glob.glob("{}/*.json".format(self.scrape_dir))]
101 for doc in documents:
102 self.add(doc, index)
103
104 def add(self, doc, index):
105 """
106 Add an individual site's index to the Meilisearch service.
107 """
108 with open(doc) as scraped_index:
109 data = json.load(scraped_index)
110 index.add_documents(data)
111
112 def update(self):
113 """
114 Updates the Meilisearch index.
115 """
116 # Create a Meilisearch client.
117 client = meilisearch.Client(self.getConnectionString(), self.getMasterKey())
118
119 # Delete previous index
120 if len(client.get_indexes()):
121 client.get_index(self.docs_index).delete()
122
123 # Create a new index
124 index = client.create_index(uid=self.docs_index, options={'primaryKey': self.primaryKey, 'uid': self.index_name})
125
126 # Add synonyms for the index
127 index.update_synonyms(self.synonyms)
128
129 # Update its settings: what can be searched, what's displayable, and how results should be ranked.
130 index.update_settings(self.updated_settings)
131
132 # Update distinct attribute.
133 index.update_distinct_attribute(self.distinct_attribute)
134
135 # Add documents to the index
136 self.add_documents(index)
137
138 if __name__ == "__main__":
139 meili = Search()
140 meili.update()
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/search/main.py b/search/main.py
--- a/search/main.py
+++ b/search/main.py
@@ -21,9 +21,9 @@
# Below are Platform.sh custom settings for how the search engine functions.
# Data available to the dropdown React app in docs, used to fill out autocomplete results.
- self.displayed_attributes = ['title', 'text', 'url', 'site', 'section']
+ self.displayed_attributes = ['keywords', 'title', 'text', 'url', 'site', 'section']
# Data actually searchable by our queries.
- self.searchable_attributes = ['title', 'pageUrl', 'section', 'url', 'text']
+ self.searchable_attributes = ['keywords', 'title', 'pageUrl', 'section', 'text', 'url']
# Show results for one query with the listed pages, when they by default would not show up as best results.
# Note: these aren't automatically two-way, which is why they're all defined twice.
|
{"golden_diff": "diff --git a/search/main.py b/search/main.py\n--- a/search/main.py\n+++ b/search/main.py\n@@ -21,9 +21,9 @@\n # Below are Platform.sh custom settings for how the search engine functions.\n \n # Data available to the dropdown React app in docs, used to fill out autocomplete results.\n- self.displayed_attributes = ['title', 'text', 'url', 'site', 'section']\n+ self.displayed_attributes = ['keywords', 'title', 'text', 'url', 'site', 'section']\n # Data actually searchable by our queries.\n- self.searchable_attributes = ['title', 'pageUrl', 'section', 'url', 'text']\n+ self.searchable_attributes = ['keywords', 'title', 'pageUrl', 'section', 'text', 'url']\n \n # Show results for one query with the listed pages, when they by default would not show up as best results.\n # Note: these aren't automatically two-way, which is why they're all defined twice.\n", "issue": "Add keywords for search\n### Where on docs.platform.sh should be changed?\n\n/configuration/app/app-reference.html\n\n### What exactly should be updated?\n\nWe'd like specific pages to be findable by searching for specific words. For example, the app reference when searching for `.platform.app.yaml` (this may also involve a problem with escaping characters like `.`). Add keywords or other to make these pages findable.\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "import os\nimport glob\nimport json\nimport meilisearch\nfrom platformshconfig import Config\n\nclass Search:\n def __init__(self):\n self.default = {\n \"host\": \"http://127.0.0.1\",\n \"key\": None,\n \"port\": 7700\n }\n\n self.scrape_dir = \"output\"\n self.scrape_config = \"config/scrape.json\"\n self.docs_index = \"docs\"\n self.primaryKey = \"documentId\"\n self.index_name = \"Docs\"\n\n # Below are Platform.sh custom settings for how the search engine functions.\n\n # Data available to the dropdown React app in docs, used to fill out autocomplete results.\n self.displayed_attributes = ['title', 'text', 'url', 'site', 'section']\n # Data actually searchable by our queries.\n self.searchable_attributes = ['title', 'pageUrl', 'section', 'url', 'text']\n\n # Show results for one query with the listed pages, when they by default would not show up as best results.\n # Note: these aren't automatically two-way, which is why they're all defined twice.\n self.synonyms = {\n \"cron\": [\"crons\"],\n \"crons\": [\"cron tasks\", \"cron jobs\"],\n \"e-mail\": [\"email\"],\n \"routes.yaml\": [\"routes\"],\n \"routes\": [\"routes.yaml\"],\n \"services\": [\"services.yaml\"],\n \"services.yaml\": [\"services\"],\n \"application\": [\".platform.app.yaml\", \"app.yaml\", \"applications.yaml\"],\n \".platform.app.yaml\": [\"application\"],\n \"app.yaml\": [\"application\"],\n \"applications.yaml\": [\"application\", \"multi-app\"],\n \"multi-app\": [\"applications.yaml\"],\n \"regions\": [\"public ip addresses\"],\n \"public ip addresses\": [\"regions\"],\n \"ssl\": [\"https\", \"tls\"],\n \"https\": [\"ssl\"],\n }\n\n # Ranking rules:\n #\n # - Default order: [\"words\", \"typo\", \"proximity\", \"attribute\", \"sort\", \"exactness\"]\n #\n # - words: number of times query is in document (greater number gets priority)\n # - typo: fewer typos > more typos\n # - proximity: smaller distance between multiple occurences of query in same document > larger distances\n # - attribute: sorted according to order of importance of attributes (searchable_attributes). terms in\n # more important attributes first.\n # - sort: queries are sorted at query time\n # - exactness: similarity of matched words in document with query\n\n self.ranking_rules = [\"rank:asc\", \"attribute\", \"typo\", \"words\", \"proximity\", \"exactness\"]\n\n self.updated_settings = {\n \"rankingRules\": self.ranking_rules,\n \"searchableAttributes\": self.searchable_attributes,\n \"displayedAttributes\": self.displayed_attributes\n }\n\n # Group results by page\n self.distinct_attribute = \"pageUrl\"\n\n def getConnectionString(self):\n \"\"\"\n Sets the Meilisearch host string, depending on the environment.\n\n Returns:\n string: Meilisearch host string.\n \"\"\"\n if os.environ.get('PORT'):\n return \"{}:{}\".format(self.default[\"host\"], os.environ['PORT'])\n else:\n return \"{}:{}\".format(self.default[\"host\"], self.default[\"port\"])\n\n def getMasterKey(self):\n \"\"\"\n Retrieves the Meilisearch master key, either from the Platform.sh environment or locally.\n \"\"\"\n config = Config()\n if config.is_valid_platform():\n return config.projectEntropy\n elif os.environ.get(\"MEILI_MASTER_KEY\"):\n return os.environ[\"MEILI_MASTER_KEY\"]\n else:\n return self.default[\"key\"]\n\n def add_documents(self, index):\n \"\"\"\n Cycle through the individual site indexes in /outputs so their individual documents can be added to Meilisearch.\n \"\"\"\n documents = [f for f in glob.glob(\"{}/*.json\".format(self.scrape_dir))]\n for doc in documents:\n self.add(doc, index)\n\n def add(self, doc, index):\n \"\"\"\n Add an individual site's index to the Meilisearch service.\n \"\"\"\n with open(doc) as scraped_index:\n data = json.load(scraped_index)\n index.add_documents(data)\n\n def update(self):\n \"\"\"\n Updates the Meilisearch index.\n \"\"\"\n # Create a Meilisearch client.\n client = meilisearch.Client(self.getConnectionString(), self.getMasterKey())\n\n # Delete previous index\n if len(client.get_indexes()):\n client.get_index(self.docs_index).delete()\n\n # Create a new index\n index = client.create_index(uid=self.docs_index, options={'primaryKey': self.primaryKey, 'uid': self.index_name})\n\n # Add synonyms for the index\n index.update_synonyms(self.synonyms)\n\n # Update its settings: what can be searched, what's displayable, and how results should be ranked.\n index.update_settings(self.updated_settings)\n\n # Update distinct attribute.\n index.update_distinct_attribute(self.distinct_attribute)\n\n # Add documents to the index\n self.add_documents(index)\n\nif __name__ == \"__main__\":\n meili = Search()\n meili.update()\n", "path": "search/main.py"}], "after_files": [{"content": "import os\nimport glob\nimport json\nimport meilisearch\nfrom platformshconfig import Config\n\nclass Search:\n def __init__(self):\n self.default = {\n \"host\": \"http://127.0.0.1\",\n \"key\": None,\n \"port\": 7700\n }\n\n self.scrape_dir = \"output\"\n self.scrape_config = \"config/scrape.json\"\n self.docs_index = \"docs\"\n self.primaryKey = \"documentId\"\n self.index_name = \"Docs\"\n\n # Below are Platform.sh custom settings for how the search engine functions.\n\n # Data available to the dropdown React app in docs, used to fill out autocomplete results.\n self.displayed_attributes = ['keywords', 'title', 'text', 'url', 'site', 'section']\n # Data actually searchable by our queries.\n self.searchable_attributes = ['keywords', 'title', 'pageUrl', 'section', 'text', 'url']\n\n # Show results for one query with the listed pages, when they by default would not show up as best results.\n # Note: these aren't automatically two-way, which is why they're all defined twice.\n self.synonyms = {\n \"cron\": [\"crons\"],\n \"crons\": [\"cron tasks\", \"cron jobs\"],\n \"e-mail\": [\"email\"],\n \"routes.yaml\": [\"routes\"],\n \"routes\": [\"routes.yaml\"],\n \"services\": [\"services.yaml\"],\n \"services.yaml\": [\"services\"],\n \"application\": [\".platform.app.yaml\", \"app.yaml\", \"applications.yaml\"],\n \".platform.app.yaml\": [\"application\"],\n \"app.yaml\": [\"application\"],\n \"applications.yaml\": [\"application\", \"multi-app\"],\n \"multi-app\": [\"applications.yaml\"],\n \"regions\": [\"public ip addresses\"],\n \"public ip addresses\": [\"regions\"],\n \"ssl\": [\"https\", \"tls\"],\n \"https\": [\"ssl\"],\n }\n\n # Ranking rules:\n #\n # - Default order: [\"words\", \"typo\", \"proximity\", \"attribute\", \"sort\", \"exactness\"]\n #\n # - words: number of times query is in document (greater number gets priority)\n # - typo: fewer typos > more typos\n # - proximity: smaller distance between multiple occurences of query in same document > larger distances\n # - attribute: sorted according to order of importance of attributes (searchable_attributes). terms in\n # more important attributes first.\n # - sort: queries are sorted at query time\n # - exactness: similarity of matched words in document with query\n\n self.ranking_rules = [\"rank:asc\", \"attribute\", \"typo\", \"words\", \"proximity\", \"exactness\"]\n\n self.updated_settings = {\n \"rankingRules\": self.ranking_rules,\n \"searchableAttributes\": self.searchable_attributes,\n \"displayedAttributes\": self.displayed_attributes\n }\n\n # Group results by page\n self.distinct_attribute = \"pageUrl\"\n\n def getConnectionString(self):\n \"\"\"\n Sets the Meilisearch host string, depending on the environment.\n\n Returns:\n string: Meilisearch host string.\n \"\"\"\n if os.environ.get('PORT'):\n return \"{}:{}\".format(self.default[\"host\"], os.environ['PORT'])\n else:\n return \"{}:{}\".format(self.default[\"host\"], self.default[\"port\"])\n\n def getMasterKey(self):\n \"\"\"\n Retrieves the Meilisearch master key, either from the Platform.sh environment or locally.\n \"\"\"\n config = Config()\n if config.is_valid_platform():\n return config.projectEntropy\n elif os.environ.get(\"MEILI_MASTER_KEY\"):\n return os.environ[\"MEILI_MASTER_KEY\"]\n else:\n return self.default[\"key\"]\n\n def add_documents(self, index):\n \"\"\"\n Cycle through the individual site indexes in /outputs so their individual documents can be added to Meilisearch.\n \"\"\"\n documents = [f for f in glob.glob(\"{}/*.json\".format(self.scrape_dir))]\n for doc in documents:\n self.add(doc, index)\n\n def add(self, doc, index):\n \"\"\"\n Add an individual site's index to the Meilisearch service.\n \"\"\"\n with open(doc) as scraped_index:\n data = json.load(scraped_index)\n index.add_documents(data)\n\n def update(self):\n \"\"\"\n Updates the Meilisearch index.\n \"\"\"\n # Create a Meilisearch client.\n client = meilisearch.Client(self.getConnectionString(), self.getMasterKey())\n\n # Delete previous index\n if len(client.get_indexes()):\n client.get_index(self.docs_index).delete()\n\n # Create a new index\n index = client.create_index(uid=self.docs_index, options={'primaryKey': self.primaryKey, 'uid': self.index_name})\n\n # Add synonyms for the index\n index.update_synonyms(self.synonyms)\n\n # Update its settings: what can be searched, what's displayable, and how results should be ranked.\n index.update_settings(self.updated_settings)\n\n # Update distinct attribute.\n index.update_distinct_attribute(self.distinct_attribute)\n\n # Add documents to the index\n self.add_documents(index)\n\nif __name__ == \"__main__\":\n meili = Search()\n meili.update()\n", "path": "search/main.py"}]}
| 1,808 | 221 |
gh_patches_debug_37279
|
rasdani/github-patches
|
git_diff
|
saleor__saleor-12359
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: Missing "app id" in app extension token
### What are you trying to achieve?
App (via app-sdk) is checking `app` field in user JWT token (sent from dashboard, via postmessage to appbridge) and compares it to registered app ID (that app persists). If app ID is different, app is rejecting the call.
However, there are also extensions that have their own token (they can have different permissions). **This token lacks `app` field**.
For app, extension is its part, so its expected to have app ID in the token as well. Otherwise, app would require to persist all registered extensions to work, which is quite hard to implement.
### Steps to reproduce the problem
## App token payload
```json
{
"iat": 1679048767,
"owner": "saleor",
"iss": "https://automation-dashboard.staging.saleor.cloud/graphql/",
"exp": 1679135167,
"token": "M2irFmzVASR3",
"email": "[email protected]",
"type": "thirdparty",
"user_id": "VXNlcjoxMDMz",
"is_staff": true,
"app": "QXBwOjY0",
"permissions": [
"MANAGE_PRODUCTS"
],
"user_permissions": [
"MANAGE_GIFT_CARD",
"MANAGE_MENUS",
"MANAGE_PAGES",
"MANAGE_PAGE_TYPES_AND_ATTRIBUTES",
"MANAGE_PLUGINS",
"MANAGE_TAXES",
"MANAGE_USERS",
"MANAGE_CHECKOUTS",
"MANAGE_PRODUCT_TYPES_AND_ATTRIBUTES",
"MANAGE_TRANSLATIONS",
"MANAGE_APPS",
"MANAGE_OBSERVABILITY",
"MANAGE_STAFF",
"HANDLE_TAXES",
"MANAGE_CHANNELS",
"HANDLE_CHECKOUTS",
"MANAGE_SETTINGS",
"HANDLE_PAYMENTS",
"MANAGE_ORDERS",
"MANAGE_PRODUCTS",
"MANAGE_SHIPPING",
"MANAGE_DISCOUNTS",
"IMPERSONATE_USER"
]
}
```
## Extension token payload
```json
"owner": "saleor",
"iss": "https://automation-dashboard.staging.saleor.cloud/graphql/",
"exp": 1679137338,
"token": "M2irFmzVASR3",
"email": "[email protected]",
"type": "thirdparty",
"user_id": "VXNlcjoxMDMz",
"is_staff": true,
"app_extension": "QXBwRXh0ZW5zaW9uOjQy",
"permissions": [],
"user_permissions": [
"MANAGE_GIFT_CARD",
"MANAGE_MENUS",
"MANAGE_PAGES",
"MANAGE_PAGE_TYPES_AND_ATTRIBUTES",
"MANAGE_PLUGINS",
"MANAGE_TAXES",
"MANAGE_USERS",
"MANAGE_CHECKOUTS",
"MANAGE_PRODUCT_TYPES_AND_ATTRIBUTES",
"MANAGE_TRANSLATIONS",
"MANAGE_APPS",
"MANAGE_OBSERVABILITY",
"MANAGE_STAFF",
"HANDLE_TAXES",
"MANAGE_CHANNELS",
"HANDLE_CHECKOUTS",
"MANAGE_SETTINGS",
"HANDLE_PAYMENTS",
"MANAGE_ORDERS",
"MANAGE_PRODUCTS",
"MANAGE_SHIPPING",
"MANAGE_DISCOUNTS",
"IMPERSONATE_USER"
]
}
```
### What did you expect to happen?
Extension token should contain `app` field
### Logs
_No response_
### Environment
Saleor version: …
OS and version: …
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/core/jwt.py`
Content:
```
1 from datetime import datetime, timedelta
2 from typing import Any, Dict, Iterable, Optional
3
4 import graphene
5 import jwt
6 from django.conf import settings
7
8 from ..account.models import User
9 from ..app.models import App, AppExtension
10 from ..permission.enums import (
11 get_permission_names,
12 get_permissions_from_codenames,
13 get_permissions_from_names,
14 )
15 from ..permission.models import Permission
16 from .jwt_manager import get_jwt_manager
17
18 JWT_ACCESS_TYPE = "access"
19 JWT_REFRESH_TYPE = "refresh"
20 JWT_THIRDPARTY_ACCESS_TYPE = "thirdparty"
21 JWT_REFRESH_TOKEN_COOKIE_NAME = "refreshToken"
22
23 PERMISSIONS_FIELD = "permissions"
24 USER_PERMISSION_FIELD = "user_permissions"
25 JWT_SALEOR_OWNER_NAME = "saleor"
26 JWT_OWNER_FIELD = "owner"
27
28
29 def jwt_base_payload(
30 exp_delta: Optional[timedelta], token_owner: str
31 ) -> Dict[str, Any]:
32 utc_now = datetime.utcnow()
33
34 payload = {
35 "iat": utc_now,
36 JWT_OWNER_FIELD: token_owner,
37 "iss": get_jwt_manager().get_issuer(),
38 }
39 if exp_delta:
40 payload["exp"] = utc_now + exp_delta
41 return payload
42
43
44 def jwt_user_payload(
45 user: User,
46 token_type: str,
47 exp_delta: Optional[timedelta],
48 additional_payload: Optional[Dict[str, Any]] = None,
49 token_owner: str = JWT_SALEOR_OWNER_NAME,
50 ) -> Dict[str, Any]:
51 payload = jwt_base_payload(exp_delta, token_owner)
52 payload.update(
53 {
54 "token": user.jwt_token_key,
55 "email": user.email,
56 "type": token_type,
57 "user_id": graphene.Node.to_global_id("User", user.id),
58 "is_staff": user.is_staff,
59 }
60 )
61 if additional_payload:
62 payload.update(additional_payload)
63 return payload
64
65
66 def jwt_encode(payload: Dict[str, Any]) -> str:
67 jwt_manager = get_jwt_manager()
68 return jwt_manager.encode(payload)
69
70
71 def jwt_decode_with_exception_handler(
72 token: str, verify_expiration=settings.JWT_EXPIRE
73 ) -> Optional[Dict[str, Any]]:
74 try:
75 return jwt_decode(token, verify_expiration=verify_expiration)
76 except jwt.PyJWTError:
77 return None
78
79
80 def jwt_decode(
81 token: str, verify_expiration=settings.JWT_EXPIRE, verify_aud: bool = False
82 ) -> Dict[str, Any]:
83 jwt_manager = get_jwt_manager()
84 return jwt_manager.decode(token, verify_expiration, verify_aud=verify_aud)
85
86
87 def create_token(payload: Dict[str, Any], exp_delta: timedelta) -> str:
88 payload.update(jwt_base_payload(exp_delta, token_owner=JWT_SALEOR_OWNER_NAME))
89 return jwt_encode(payload)
90
91
92 def create_access_token(
93 user: User, additional_payload: Optional[Dict[str, Any]] = None
94 ) -> str:
95 payload = jwt_user_payload(
96 user, JWT_ACCESS_TYPE, settings.JWT_TTL_ACCESS, additional_payload
97 )
98 return jwt_encode(payload)
99
100
101 def create_refresh_token(
102 user: User, additional_payload: Optional[Dict[str, Any]] = None
103 ) -> str:
104 payload = jwt_user_payload(
105 user,
106 JWT_REFRESH_TYPE,
107 settings.JWT_TTL_REFRESH,
108 additional_payload,
109 )
110 return jwt_encode(payload)
111
112
113 def get_user_from_payload(payload: Dict[str, Any], request=None) -> Optional[User]:
114 # TODO: dataloader
115 user = User.objects.filter(email=payload["email"], is_active=True).first()
116 user_jwt_token = payload.get("token")
117 if not user_jwt_token or not user:
118 raise jwt.InvalidTokenError(
119 "Invalid token. Create new one by using tokenCreate mutation."
120 )
121 if user.jwt_token_key != user_jwt_token:
122 raise jwt.InvalidTokenError(
123 "Invalid token. Create new one by using tokenCreate mutation."
124 )
125 return user
126
127
128 def is_saleor_token(token: str) -> bool:
129 """Confirm that token was generated by Saleor not by plugin."""
130 try:
131 payload = jwt.decode(token, options={"verify_signature": False})
132 except jwt.PyJWTError:
133 return False
134 owner = payload.get(JWT_OWNER_FIELD)
135 if not owner or owner != JWT_SALEOR_OWNER_NAME:
136 return False
137 return True
138
139
140 def get_user_from_access_payload(payload: dict, request=None) -> Optional[User]:
141 jwt_type = payload.get("type")
142 if jwt_type not in [JWT_ACCESS_TYPE, JWT_THIRDPARTY_ACCESS_TYPE]:
143 raise jwt.InvalidTokenError(
144 "Invalid token. Create new one by using tokenCreate mutation."
145 )
146 permissions = payload.get(PERMISSIONS_FIELD, None)
147 user = get_user_from_payload(payload, request)
148 if user:
149 if permissions is not None:
150 token_permissions = get_permissions_from_names(permissions)
151 token_codenames = [perm.codename for perm in token_permissions]
152 user.effective_permissions = get_permissions_from_codenames(token_codenames)
153 user.is_staff = True if user.effective_permissions else False
154
155 if payload.get("is_staff"):
156 user.is_staff = True
157 return user
158
159
160 def _create_access_token_for_third_party_actions(
161 permissions: Iterable["Permission"],
162 user: "User",
163 type: str,
164 object_id: int,
165 object_payload_key: str,
166 audience: Optional[str],
167 ):
168 app_permission_enums = get_permission_names(permissions)
169
170 permissions = user.effective_permissions
171 user_permission_enums = get_permission_names(permissions)
172 additional_payload = {
173 object_payload_key: graphene.Node.to_global_id(type, object_id),
174 PERMISSIONS_FIELD: list(app_permission_enums & user_permission_enums),
175 USER_PERMISSION_FIELD: list(user_permission_enums),
176 }
177 if audience:
178 additional_payload["aud"] = audience
179
180 payload = jwt_user_payload(
181 user,
182 JWT_THIRDPARTY_ACCESS_TYPE,
183 exp_delta=settings.JWT_TTL_APP_ACCESS,
184 additional_payload=additional_payload,
185 )
186 return jwt_encode(payload)
187
188
189 def create_access_token_for_app(app: "App", user: "User"):
190 """Create access token for app.
191
192 App can use user's JWT token to proceed given operation in Saleor.
193 The token which can be used by App has additional field defining the permissions
194 assigned to it. The permissions set is the intersection of user permissions and
195 app permissions.
196 """
197 app_permissions = app.permissions.all()
198 return _create_access_token_for_third_party_actions(
199 permissions=app_permissions,
200 user=user,
201 type="App",
202 object_id=app.id,
203 object_payload_key="app",
204 audience=app.audience,
205 )
206
207
208 def create_access_token_for_app_extension(
209 app_extension: "AppExtension",
210 permissions: Iterable["Permission"],
211 user: "User",
212 app: "App",
213 ):
214 return _create_access_token_for_third_party_actions(
215 permissions=permissions,
216 user=user,
217 type="AppExtension",
218 object_id=app_extension.id,
219 object_payload_key="app_extension",
220 audience=app.audience,
221 )
222
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/saleor/core/jwt.py b/saleor/core/jwt.py
--- a/saleor/core/jwt.py
+++ b/saleor/core/jwt.py
@@ -20,6 +20,7 @@
JWT_THIRDPARTY_ACCESS_TYPE = "thirdparty"
JWT_REFRESH_TOKEN_COOKIE_NAME = "refreshToken"
+APP_KEY_FIELD = "app"
PERMISSIONS_FIELD = "permissions"
USER_PERMISSION_FIELD = "user_permissions"
JWT_SALEOR_OWNER_NAME = "saleor"
@@ -160,22 +161,22 @@
def _create_access_token_for_third_party_actions(
permissions: Iterable["Permission"],
user: "User",
- type: str,
- object_id: int,
- object_payload_key: str,
- audience: Optional[str],
+ app: "App",
+ extra: Optional[Dict[str, Any]] = None,
):
app_permission_enums = get_permission_names(permissions)
permissions = user.effective_permissions
user_permission_enums = get_permission_names(permissions)
additional_payload = {
- object_payload_key: graphene.Node.to_global_id(type, object_id),
+ APP_KEY_FIELD: graphene.Node.to_global_id("App", app.id),
PERMISSIONS_FIELD: list(app_permission_enums & user_permission_enums),
USER_PERMISSION_FIELD: list(user_permission_enums),
}
- if audience:
- additional_payload["aud"] = audience
+ if app.audience:
+ additional_payload["aud"] = app.audience
+ if extra:
+ additional_payload.update(extra)
payload = jwt_user_payload(
user,
@@ -196,12 +197,7 @@
"""
app_permissions = app.permissions.all()
return _create_access_token_for_third_party_actions(
- permissions=app_permissions,
- user=user,
- type="App",
- object_id=app.id,
- object_payload_key="app",
- audience=app.audience,
+ permissions=app_permissions, user=user, app=app
)
@@ -211,11 +207,10 @@
user: "User",
app: "App",
):
+ app_extension_id = graphene.Node.to_global_id("AppExtension", app_extension.id)
return _create_access_token_for_third_party_actions(
permissions=permissions,
user=user,
- type="AppExtension",
- object_id=app_extension.id,
- object_payload_key="app_extension",
- audience=app.audience,
+ app=app,
+ extra={"app_extension": app_extension_id},
)
|
{"golden_diff": "diff --git a/saleor/core/jwt.py b/saleor/core/jwt.py\n--- a/saleor/core/jwt.py\n+++ b/saleor/core/jwt.py\n@@ -20,6 +20,7 @@\n JWT_THIRDPARTY_ACCESS_TYPE = \"thirdparty\"\n JWT_REFRESH_TOKEN_COOKIE_NAME = \"refreshToken\"\n \n+APP_KEY_FIELD = \"app\"\n PERMISSIONS_FIELD = \"permissions\"\n USER_PERMISSION_FIELD = \"user_permissions\"\n JWT_SALEOR_OWNER_NAME = \"saleor\"\n@@ -160,22 +161,22 @@\n def _create_access_token_for_third_party_actions(\n permissions: Iterable[\"Permission\"],\n user: \"User\",\n- type: str,\n- object_id: int,\n- object_payload_key: str,\n- audience: Optional[str],\n+ app: \"App\",\n+ extra: Optional[Dict[str, Any]] = None,\n ):\n app_permission_enums = get_permission_names(permissions)\n \n permissions = user.effective_permissions\n user_permission_enums = get_permission_names(permissions)\n additional_payload = {\n- object_payload_key: graphene.Node.to_global_id(type, object_id),\n+ APP_KEY_FIELD: graphene.Node.to_global_id(\"App\", app.id),\n PERMISSIONS_FIELD: list(app_permission_enums & user_permission_enums),\n USER_PERMISSION_FIELD: list(user_permission_enums),\n }\n- if audience:\n- additional_payload[\"aud\"] = audience\n+ if app.audience:\n+ additional_payload[\"aud\"] = app.audience\n+ if extra:\n+ additional_payload.update(extra)\n \n payload = jwt_user_payload(\n user,\n@@ -196,12 +197,7 @@\n \"\"\"\n app_permissions = app.permissions.all()\n return _create_access_token_for_third_party_actions(\n- permissions=app_permissions,\n- user=user,\n- type=\"App\",\n- object_id=app.id,\n- object_payload_key=\"app\",\n- audience=app.audience,\n+ permissions=app_permissions, user=user, app=app\n )\n \n \n@@ -211,11 +207,10 @@\n user: \"User\",\n app: \"App\",\n ):\n+ app_extension_id = graphene.Node.to_global_id(\"AppExtension\", app_extension.id)\n return _create_access_token_for_third_party_actions(\n permissions=permissions,\n user=user,\n- type=\"AppExtension\",\n- object_id=app_extension.id,\n- object_payload_key=\"app_extension\",\n- audience=app.audience,\n+ app=app,\n+ extra={\"app_extension\": app_extension_id},\n )\n", "issue": "Bug: Missing \"app id\" in app extension token\n### What are you trying to achieve?\r\n\r\nApp (via app-sdk) is checking `app` field in user JWT token (sent from dashboard, via postmessage to appbridge) and compares it to registered app ID (that app persists). If app ID is different, app is rejecting the call.\r\n\r\nHowever, there are also extensions that have their own token (they can have different permissions). **This token lacks `app` field**. \r\n\r\nFor app, extension is its part, so its expected to have app ID in the token as well. Otherwise, app would require to persist all registered extensions to work, which is quite hard to implement.\r\n\r\n### Steps to reproduce the problem\r\n\r\n## App token payload\r\n\r\n```json\r\n{\r\n \"iat\": 1679048767,\r\n \"owner\": \"saleor\",\r\n \"iss\": \"https://automation-dashboard.staging.saleor.cloud/graphql/\",\r\n \"exp\": 1679135167,\r\n \"token\": \"M2irFmzVASR3\",\r\n \"email\": \"[email protected]\",\r\n \"type\": \"thirdparty\",\r\n \"user_id\": \"VXNlcjoxMDMz\",\r\n \"is_staff\": true,\r\n \"app\": \"QXBwOjY0\",\r\n \"permissions\": [\r\n \"MANAGE_PRODUCTS\"\r\n ],\r\n \"user_permissions\": [\r\n \"MANAGE_GIFT_CARD\",\r\n \"MANAGE_MENUS\",\r\n \"MANAGE_PAGES\",\r\n \"MANAGE_PAGE_TYPES_AND_ATTRIBUTES\",\r\n \"MANAGE_PLUGINS\",\r\n \"MANAGE_TAXES\",\r\n \"MANAGE_USERS\",\r\n \"MANAGE_CHECKOUTS\",\r\n \"MANAGE_PRODUCT_TYPES_AND_ATTRIBUTES\",\r\n \"MANAGE_TRANSLATIONS\",\r\n \"MANAGE_APPS\",\r\n \"MANAGE_OBSERVABILITY\",\r\n \"MANAGE_STAFF\",\r\n \"HANDLE_TAXES\",\r\n \"MANAGE_CHANNELS\",\r\n \"HANDLE_CHECKOUTS\",\r\n \"MANAGE_SETTINGS\",\r\n \"HANDLE_PAYMENTS\",\r\n \"MANAGE_ORDERS\",\r\n \"MANAGE_PRODUCTS\",\r\n \"MANAGE_SHIPPING\",\r\n \"MANAGE_DISCOUNTS\",\r\n \"IMPERSONATE_USER\"\r\n ]\r\n}\r\n```\r\n\r\n\r\n## Extension token payload\r\n\r\n```json\r\n \"owner\": \"saleor\",\r\n \"iss\": \"https://automation-dashboard.staging.saleor.cloud/graphql/\",\r\n \"exp\": 1679137338,\r\n \"token\": \"M2irFmzVASR3\",\r\n \"email\": \"[email protected]\",\r\n \"type\": \"thirdparty\",\r\n \"user_id\": \"VXNlcjoxMDMz\",\r\n \"is_staff\": true,\r\n \"app_extension\": \"QXBwRXh0ZW5zaW9uOjQy\",\r\n \"permissions\": [],\r\n \"user_permissions\": [\r\n \"MANAGE_GIFT_CARD\",\r\n \"MANAGE_MENUS\",\r\n \"MANAGE_PAGES\",\r\n \"MANAGE_PAGE_TYPES_AND_ATTRIBUTES\",\r\n \"MANAGE_PLUGINS\",\r\n \"MANAGE_TAXES\",\r\n \"MANAGE_USERS\",\r\n \"MANAGE_CHECKOUTS\",\r\n \"MANAGE_PRODUCT_TYPES_AND_ATTRIBUTES\",\r\n \"MANAGE_TRANSLATIONS\",\r\n \"MANAGE_APPS\",\r\n \"MANAGE_OBSERVABILITY\",\r\n \"MANAGE_STAFF\",\r\n \"HANDLE_TAXES\",\r\n \"MANAGE_CHANNELS\",\r\n \"HANDLE_CHECKOUTS\",\r\n \"MANAGE_SETTINGS\",\r\n \"HANDLE_PAYMENTS\",\r\n \"MANAGE_ORDERS\",\r\n \"MANAGE_PRODUCTS\",\r\n \"MANAGE_SHIPPING\",\r\n \"MANAGE_DISCOUNTS\",\r\n \"IMPERSONATE_USER\"\r\n ]\r\n}\r\n```\r\n\r\n### What did you expect to happen?\r\n\r\nExtension token should contain `app` field\r\n\r\n### Logs\r\n\r\n_No response_\r\n\r\n### Environment\r\n\r\nSaleor version: \u2026\r\nOS and version: \u2026\r\n\n", "before_files": [{"content": "from datetime import datetime, timedelta\nfrom typing import Any, Dict, Iterable, Optional\n\nimport graphene\nimport jwt\nfrom django.conf import settings\n\nfrom ..account.models import User\nfrom ..app.models import App, AppExtension\nfrom ..permission.enums import (\n get_permission_names,\n get_permissions_from_codenames,\n get_permissions_from_names,\n)\nfrom ..permission.models import Permission\nfrom .jwt_manager import get_jwt_manager\n\nJWT_ACCESS_TYPE = \"access\"\nJWT_REFRESH_TYPE = \"refresh\"\nJWT_THIRDPARTY_ACCESS_TYPE = \"thirdparty\"\nJWT_REFRESH_TOKEN_COOKIE_NAME = \"refreshToken\"\n\nPERMISSIONS_FIELD = \"permissions\"\nUSER_PERMISSION_FIELD = \"user_permissions\"\nJWT_SALEOR_OWNER_NAME = \"saleor\"\nJWT_OWNER_FIELD = \"owner\"\n\n\ndef jwt_base_payload(\n exp_delta: Optional[timedelta], token_owner: str\n) -> Dict[str, Any]:\n utc_now = datetime.utcnow()\n\n payload = {\n \"iat\": utc_now,\n JWT_OWNER_FIELD: token_owner,\n \"iss\": get_jwt_manager().get_issuer(),\n }\n if exp_delta:\n payload[\"exp\"] = utc_now + exp_delta\n return payload\n\n\ndef jwt_user_payload(\n user: User,\n token_type: str,\n exp_delta: Optional[timedelta],\n additional_payload: Optional[Dict[str, Any]] = None,\n token_owner: str = JWT_SALEOR_OWNER_NAME,\n) -> Dict[str, Any]:\n payload = jwt_base_payload(exp_delta, token_owner)\n payload.update(\n {\n \"token\": user.jwt_token_key,\n \"email\": user.email,\n \"type\": token_type,\n \"user_id\": graphene.Node.to_global_id(\"User\", user.id),\n \"is_staff\": user.is_staff,\n }\n )\n if additional_payload:\n payload.update(additional_payload)\n return payload\n\n\ndef jwt_encode(payload: Dict[str, Any]) -> str:\n jwt_manager = get_jwt_manager()\n return jwt_manager.encode(payload)\n\n\ndef jwt_decode_with_exception_handler(\n token: str, verify_expiration=settings.JWT_EXPIRE\n) -> Optional[Dict[str, Any]]:\n try:\n return jwt_decode(token, verify_expiration=verify_expiration)\n except jwt.PyJWTError:\n return None\n\n\ndef jwt_decode(\n token: str, verify_expiration=settings.JWT_EXPIRE, verify_aud: bool = False\n) -> Dict[str, Any]:\n jwt_manager = get_jwt_manager()\n return jwt_manager.decode(token, verify_expiration, verify_aud=verify_aud)\n\n\ndef create_token(payload: Dict[str, Any], exp_delta: timedelta) -> str:\n payload.update(jwt_base_payload(exp_delta, token_owner=JWT_SALEOR_OWNER_NAME))\n return jwt_encode(payload)\n\n\ndef create_access_token(\n user: User, additional_payload: Optional[Dict[str, Any]] = None\n) -> str:\n payload = jwt_user_payload(\n user, JWT_ACCESS_TYPE, settings.JWT_TTL_ACCESS, additional_payload\n )\n return jwt_encode(payload)\n\n\ndef create_refresh_token(\n user: User, additional_payload: Optional[Dict[str, Any]] = None\n) -> str:\n payload = jwt_user_payload(\n user,\n JWT_REFRESH_TYPE,\n settings.JWT_TTL_REFRESH,\n additional_payload,\n )\n return jwt_encode(payload)\n\n\ndef get_user_from_payload(payload: Dict[str, Any], request=None) -> Optional[User]:\n # TODO: dataloader\n user = User.objects.filter(email=payload[\"email\"], is_active=True).first()\n user_jwt_token = payload.get(\"token\")\n if not user_jwt_token or not user:\n raise jwt.InvalidTokenError(\n \"Invalid token. Create new one by using tokenCreate mutation.\"\n )\n if user.jwt_token_key != user_jwt_token:\n raise jwt.InvalidTokenError(\n \"Invalid token. Create new one by using tokenCreate mutation.\"\n )\n return user\n\n\ndef is_saleor_token(token: str) -> bool:\n \"\"\"Confirm that token was generated by Saleor not by plugin.\"\"\"\n try:\n payload = jwt.decode(token, options={\"verify_signature\": False})\n except jwt.PyJWTError:\n return False\n owner = payload.get(JWT_OWNER_FIELD)\n if not owner or owner != JWT_SALEOR_OWNER_NAME:\n return False\n return True\n\n\ndef get_user_from_access_payload(payload: dict, request=None) -> Optional[User]:\n jwt_type = payload.get(\"type\")\n if jwt_type not in [JWT_ACCESS_TYPE, JWT_THIRDPARTY_ACCESS_TYPE]:\n raise jwt.InvalidTokenError(\n \"Invalid token. Create new one by using tokenCreate mutation.\"\n )\n permissions = payload.get(PERMISSIONS_FIELD, None)\n user = get_user_from_payload(payload, request)\n if user:\n if permissions is not None:\n token_permissions = get_permissions_from_names(permissions)\n token_codenames = [perm.codename for perm in token_permissions]\n user.effective_permissions = get_permissions_from_codenames(token_codenames)\n user.is_staff = True if user.effective_permissions else False\n\n if payload.get(\"is_staff\"):\n user.is_staff = True\n return user\n\n\ndef _create_access_token_for_third_party_actions(\n permissions: Iterable[\"Permission\"],\n user: \"User\",\n type: str,\n object_id: int,\n object_payload_key: str,\n audience: Optional[str],\n):\n app_permission_enums = get_permission_names(permissions)\n\n permissions = user.effective_permissions\n user_permission_enums = get_permission_names(permissions)\n additional_payload = {\n object_payload_key: graphene.Node.to_global_id(type, object_id),\n PERMISSIONS_FIELD: list(app_permission_enums & user_permission_enums),\n USER_PERMISSION_FIELD: list(user_permission_enums),\n }\n if audience:\n additional_payload[\"aud\"] = audience\n\n payload = jwt_user_payload(\n user,\n JWT_THIRDPARTY_ACCESS_TYPE,\n exp_delta=settings.JWT_TTL_APP_ACCESS,\n additional_payload=additional_payload,\n )\n return jwt_encode(payload)\n\n\ndef create_access_token_for_app(app: \"App\", user: \"User\"):\n \"\"\"Create access token for app.\n\n App can use user's JWT token to proceed given operation in Saleor.\n The token which can be used by App has additional field defining the permissions\n assigned to it. The permissions set is the intersection of user permissions and\n app permissions.\n \"\"\"\n app_permissions = app.permissions.all()\n return _create_access_token_for_third_party_actions(\n permissions=app_permissions,\n user=user,\n type=\"App\",\n object_id=app.id,\n object_payload_key=\"app\",\n audience=app.audience,\n )\n\n\ndef create_access_token_for_app_extension(\n app_extension: \"AppExtension\",\n permissions: Iterable[\"Permission\"],\n user: \"User\",\n app: \"App\",\n):\n return _create_access_token_for_third_party_actions(\n permissions=permissions,\n user=user,\n type=\"AppExtension\",\n object_id=app_extension.id,\n object_payload_key=\"app_extension\",\n audience=app.audience,\n )\n", "path": "saleor/core/jwt.py"}], "after_files": [{"content": "from datetime import datetime, timedelta\nfrom typing import Any, Dict, Iterable, Optional\n\nimport graphene\nimport jwt\nfrom django.conf import settings\n\nfrom ..account.models import User\nfrom ..app.models import App, AppExtension\nfrom ..permission.enums import (\n get_permission_names,\n get_permissions_from_codenames,\n get_permissions_from_names,\n)\nfrom ..permission.models import Permission\nfrom .jwt_manager import get_jwt_manager\n\nJWT_ACCESS_TYPE = \"access\"\nJWT_REFRESH_TYPE = \"refresh\"\nJWT_THIRDPARTY_ACCESS_TYPE = \"thirdparty\"\nJWT_REFRESH_TOKEN_COOKIE_NAME = \"refreshToken\"\n\nAPP_KEY_FIELD = \"app\"\nPERMISSIONS_FIELD = \"permissions\"\nUSER_PERMISSION_FIELD = \"user_permissions\"\nJWT_SALEOR_OWNER_NAME = \"saleor\"\nJWT_OWNER_FIELD = \"owner\"\n\n\ndef jwt_base_payload(\n exp_delta: Optional[timedelta], token_owner: str\n) -> Dict[str, Any]:\n utc_now = datetime.utcnow()\n\n payload = {\n \"iat\": utc_now,\n JWT_OWNER_FIELD: token_owner,\n \"iss\": get_jwt_manager().get_issuer(),\n }\n if exp_delta:\n payload[\"exp\"] = utc_now + exp_delta\n return payload\n\n\ndef jwt_user_payload(\n user: User,\n token_type: str,\n exp_delta: Optional[timedelta],\n additional_payload: Optional[Dict[str, Any]] = None,\n token_owner: str = JWT_SALEOR_OWNER_NAME,\n) -> Dict[str, Any]:\n payload = jwt_base_payload(exp_delta, token_owner)\n payload.update(\n {\n \"token\": user.jwt_token_key,\n \"email\": user.email,\n \"type\": token_type,\n \"user_id\": graphene.Node.to_global_id(\"User\", user.id),\n \"is_staff\": user.is_staff,\n }\n )\n if additional_payload:\n payload.update(additional_payload)\n return payload\n\n\ndef jwt_encode(payload: Dict[str, Any]) -> str:\n jwt_manager = get_jwt_manager()\n return jwt_manager.encode(payload)\n\n\ndef jwt_decode_with_exception_handler(\n token: str, verify_expiration=settings.JWT_EXPIRE\n) -> Optional[Dict[str, Any]]:\n try:\n return jwt_decode(token, verify_expiration=verify_expiration)\n except jwt.PyJWTError:\n return None\n\n\ndef jwt_decode(\n token: str, verify_expiration=settings.JWT_EXPIRE, verify_aud: bool = False\n) -> Dict[str, Any]:\n jwt_manager = get_jwt_manager()\n return jwt_manager.decode(token, verify_expiration, verify_aud=verify_aud)\n\n\ndef create_token(payload: Dict[str, Any], exp_delta: timedelta) -> str:\n payload.update(jwt_base_payload(exp_delta, token_owner=JWT_SALEOR_OWNER_NAME))\n return jwt_encode(payload)\n\n\ndef create_access_token(\n user: User, additional_payload: Optional[Dict[str, Any]] = None\n) -> str:\n payload = jwt_user_payload(\n user, JWT_ACCESS_TYPE, settings.JWT_TTL_ACCESS, additional_payload\n )\n return jwt_encode(payload)\n\n\ndef create_refresh_token(\n user: User, additional_payload: Optional[Dict[str, Any]] = None\n) -> str:\n payload = jwt_user_payload(\n user,\n JWT_REFRESH_TYPE,\n settings.JWT_TTL_REFRESH,\n additional_payload,\n )\n return jwt_encode(payload)\n\n\ndef get_user_from_payload(payload: Dict[str, Any], request=None) -> Optional[User]:\n # TODO: dataloader\n user = User.objects.filter(email=payload[\"email\"], is_active=True).first()\n user_jwt_token = payload.get(\"token\")\n if not user_jwt_token or not user:\n raise jwt.InvalidTokenError(\n \"Invalid token. Create new one by using tokenCreate mutation.\"\n )\n if user.jwt_token_key != user_jwt_token:\n raise jwt.InvalidTokenError(\n \"Invalid token. Create new one by using tokenCreate mutation.\"\n )\n return user\n\n\ndef is_saleor_token(token: str) -> bool:\n \"\"\"Confirm that token was generated by Saleor not by plugin.\"\"\"\n try:\n payload = jwt.decode(token, options={\"verify_signature\": False})\n except jwt.PyJWTError:\n return False\n owner = payload.get(JWT_OWNER_FIELD)\n if not owner or owner != JWT_SALEOR_OWNER_NAME:\n return False\n return True\n\n\ndef get_user_from_access_payload(payload: dict, request=None) -> Optional[User]:\n jwt_type = payload.get(\"type\")\n if jwt_type not in [JWT_ACCESS_TYPE, JWT_THIRDPARTY_ACCESS_TYPE]:\n raise jwt.InvalidTokenError(\n \"Invalid token. Create new one by using tokenCreate mutation.\"\n )\n permissions = payload.get(PERMISSIONS_FIELD, None)\n user = get_user_from_payload(payload, request)\n if user:\n if permissions is not None:\n token_permissions = get_permissions_from_names(permissions)\n token_codenames = [perm.codename for perm in token_permissions]\n user.effective_permissions = get_permissions_from_codenames(token_codenames)\n user.is_staff = True if user.effective_permissions else False\n\n if payload.get(\"is_staff\"):\n user.is_staff = True\n return user\n\n\ndef _create_access_token_for_third_party_actions(\n permissions: Iterable[\"Permission\"],\n user: \"User\",\n app: \"App\",\n extra: Optional[Dict[str, Any]] = None,\n):\n app_permission_enums = get_permission_names(permissions)\n\n permissions = user.effective_permissions\n user_permission_enums = get_permission_names(permissions)\n additional_payload = {\n APP_KEY_FIELD: graphene.Node.to_global_id(\"App\", app.id),\n PERMISSIONS_FIELD: list(app_permission_enums & user_permission_enums),\n USER_PERMISSION_FIELD: list(user_permission_enums),\n }\n if app.audience:\n additional_payload[\"aud\"] = app.audience\n if extra:\n additional_payload.update(extra)\n\n payload = jwt_user_payload(\n user,\n JWT_THIRDPARTY_ACCESS_TYPE,\n exp_delta=settings.JWT_TTL_APP_ACCESS,\n additional_payload=additional_payload,\n )\n return jwt_encode(payload)\n\n\ndef create_access_token_for_app(app: \"App\", user: \"User\"):\n \"\"\"Create access token for app.\n\n App can use user's JWT token to proceed given operation in Saleor.\n The token which can be used by App has additional field defining the permissions\n assigned to it. The permissions set is the intersection of user permissions and\n app permissions.\n \"\"\"\n app_permissions = app.permissions.all()\n return _create_access_token_for_third_party_actions(\n permissions=app_permissions, user=user, app=app\n )\n\n\ndef create_access_token_for_app_extension(\n app_extension: \"AppExtension\",\n permissions: Iterable[\"Permission\"],\n user: \"User\",\n app: \"App\",\n):\n app_extension_id = graphene.Node.to_global_id(\"AppExtension\", app_extension.id)\n return _create_access_token_for_third_party_actions(\n permissions=permissions,\n user=user,\n app=app,\n extra={\"app_extension\": app_extension_id},\n )\n", "path": "saleor/core/jwt.py"}]}
| 3,186 | 575 |
gh_patches_debug_16950
|
rasdani/github-patches
|
git_diff
|
pyg-team__pytorch_geometric-8016
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Strange behaviour with the to_dense_adj function
### 🐛 Describe the bug
While using to_dense_adj with edge attributes, I observed that the `idx` values generated are not unique ((line 94 in to_dense_adj.py). As such, the scatter_add function sums up overlapping values and generating an output greater than the original range of edge_attr values.

The required tensors can be downloaded from [here](https://filesender.switch.ch/filesender2/download.php?token=d4b1599a-6eee-4b06-8640-be16fb784ab5&files_ids=490595)
Any help or insights are highly appreciated.
Thanks,
Chinmay
### Environment
* PyG version:2.3.1
* PyTorch version: 2.0.1+cu117
* OS: Ubuntu 20.04
* Python version:3.8.10
* CUDA/cuDNN version:11.7
* How you installed PyTorch and PyG (`conda`, `pip`, source):pip
* Any other relevant information (*e.g.*, version of `torch-scatter`):
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torch_geometric/utils/to_dense_adj.py`
Content:
```
1 from typing import Optional
2
3 import torch
4 from torch import Tensor
5
6 from torch_geometric.typing import OptTensor
7 from torch_geometric.utils import cumsum, scatter
8
9
10 def to_dense_adj(
11 edge_index: Tensor,
12 batch: OptTensor = None,
13 edge_attr: OptTensor = None,
14 max_num_nodes: Optional[int] = None,
15 batch_size: Optional[int] = None,
16 ) -> Tensor:
17 r"""Converts batched sparse adjacency matrices given by edge indices and
18 edge attributes to a single dense batched adjacency matrix.
19
20 Args:
21 edge_index (LongTensor): The edge indices.
22 batch (LongTensor, optional): Batch vector
23 :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each
24 node to a specific example. (default: :obj:`None`)
25 edge_attr (Tensor, optional): Edge weights or multi-dimensional edge
26 features. (default: :obj:`None`)
27 max_num_nodes (int, optional): The size of the output node dimension.
28 (default: :obj:`None`)
29 batch_size (int, optional) The batch size. (default: :obj:`None`)
30
31 :rtype: :class:`Tensor`
32
33 Examples:
34
35 >>> edge_index = torch.tensor([[0, 0, 1, 2, 3],
36 ... [0, 1, 0, 3, 0]])
37 >>> batch = torch.tensor([0, 0, 1, 1])
38 >>> to_dense_adj(edge_index, batch)
39 tensor([[[1., 1.],
40 [1., 0.]],
41 [[0., 1.],
42 [1., 0.]]])
43
44 >>> to_dense_adj(edge_index, batch, max_num_nodes=4)
45 tensor([[[1., 1., 0., 0.],
46 [1., 0., 0., 0.],
47 [0., 0., 0., 0.],
48 [0., 0., 0., 0.]],
49 [[0., 1., 0., 0.],
50 [1., 0., 0., 0.],
51 [0., 0., 0., 0.],
52 [0., 0., 0., 0.]]])
53
54 >>> edge_attr = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0])
55 >>> to_dense_adj(edge_index, batch, edge_attr)
56 tensor([[[1., 2.],
57 [3., 0.]],
58 [[0., 4.],
59 [5., 0.]]])
60 """
61 if batch is None:
62 num_nodes = int(edge_index.max()) + 1 if edge_index.numel() > 0 else 0
63 batch = edge_index.new_zeros(num_nodes)
64
65 if batch_size is None:
66 batch_size = int(batch.max()) + 1 if batch.numel() > 0 else 1
67
68 one = batch.new_ones(batch.size(0))
69 num_nodes = scatter(one, batch, dim=0, dim_size=batch_size, reduce='sum')
70 cum_nodes = cumsum(num_nodes)
71
72 idx0 = batch[edge_index[0]]
73 idx1 = edge_index[0] - cum_nodes[batch][edge_index[0]]
74 idx2 = edge_index[1] - cum_nodes[batch][edge_index[1]]
75
76 if max_num_nodes is None:
77 max_num_nodes = int(num_nodes.max())
78
79 elif ((idx1.numel() > 0 and idx1.max() >= max_num_nodes)
80 or (idx2.numel() > 0 and idx2.max() >= max_num_nodes)):
81 mask = (idx1 < max_num_nodes) & (idx2 < max_num_nodes)
82 idx0 = idx0[mask]
83 idx1 = idx1[mask]
84 idx2 = idx2[mask]
85 edge_attr = None if edge_attr is None else edge_attr[mask]
86
87 if edge_attr is None:
88 edge_attr = torch.ones(idx0.numel(), device=edge_index.device)
89
90 size = [batch_size, max_num_nodes, max_num_nodes]
91 size += list(edge_attr.size())[1:]
92 flattened_size = batch_size * max_num_nodes * max_num_nodes
93
94 idx = idx0 * max_num_nodes * max_num_nodes + idx1 * max_num_nodes + idx2
95 adj = scatter(edge_attr, idx, dim=0, dim_size=flattened_size, reduce='sum')
96 adj = adj.view(size)
97
98 return adj
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torch_geometric/utils/to_dense_adj.py b/torch_geometric/utils/to_dense_adj.py
--- a/torch_geometric/utils/to_dense_adj.py
+++ b/torch_geometric/utils/to_dense_adj.py
@@ -23,7 +23,10 @@
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each
node to a specific example. (default: :obj:`None`)
edge_attr (Tensor, optional): Edge weights or multi-dimensional edge
- features. (default: :obj:`None`)
+ features.
+ If :obj:`edge_index` contains duplicated edges, the dense adjacency
+ matrix output holds the summed up entries of :obj:`edge_attr` for
+ duplicated edges. (default: :obj:`None`)
max_num_nodes (int, optional): The size of the output node dimension.
(default: :obj:`None`)
batch_size (int, optional) The batch size. (default: :obj:`None`)
|
{"golden_diff": "diff --git a/torch_geometric/utils/to_dense_adj.py b/torch_geometric/utils/to_dense_adj.py\n--- a/torch_geometric/utils/to_dense_adj.py\n+++ b/torch_geometric/utils/to_dense_adj.py\n@@ -23,7 +23,10 @@\n :math:`\\mathbf{b} \\in {\\{ 0, \\ldots, B-1\\}}^N`, which assigns each\n node to a specific example. (default: :obj:`None`)\n edge_attr (Tensor, optional): Edge weights or multi-dimensional edge\n- features. (default: :obj:`None`)\n+ features.\n+ If :obj:`edge_index` contains duplicated edges, the dense adjacency\n+ matrix output holds the summed up entries of :obj:`edge_attr` for\n+ duplicated edges. (default: :obj:`None`)\n max_num_nodes (int, optional): The size of the output node dimension.\n (default: :obj:`None`)\n batch_size (int, optional) The batch size. (default: :obj:`None`)\n", "issue": "Strange behaviour with the to_dense_adj function\n### \ud83d\udc1b Describe the bug\n\nWhile using to_dense_adj with edge attributes, I observed that the `idx` values generated are not unique ((line 94 in to_dense_adj.py). As such, the scatter_add function sums up overlapping values and generating an output greater than the original range of edge_attr values.\r\n\r\n\r\n\r\nThe required tensors can be downloaded from [here](https://filesender.switch.ch/filesender2/download.php?token=d4b1599a-6eee-4b06-8640-be16fb784ab5&files_ids=490595)\r\n\r\nAny help or insights are highly appreciated. \r\n\r\nThanks,\r\nChinmay\n\n### Environment\n\n* PyG version:2.3.1\r\n* PyTorch version: 2.0.1+cu117\r\n* OS: Ubuntu 20.04\r\n* Python version:3.8.10\r\n* CUDA/cuDNN version:11.7\r\n* How you installed PyTorch and PyG (`conda`, `pip`, source):pip\r\n* Any other relevant information (*e.g.*, version of `torch-scatter`):\r\n\n", "before_files": [{"content": "from typing import Optional\n\nimport torch\nfrom torch import Tensor\n\nfrom torch_geometric.typing import OptTensor\nfrom torch_geometric.utils import cumsum, scatter\n\n\ndef to_dense_adj(\n edge_index: Tensor,\n batch: OptTensor = None,\n edge_attr: OptTensor = None,\n max_num_nodes: Optional[int] = None,\n batch_size: Optional[int] = None,\n) -> Tensor:\n r\"\"\"Converts batched sparse adjacency matrices given by edge indices and\n edge attributes to a single dense batched adjacency matrix.\n\n Args:\n edge_index (LongTensor): The edge indices.\n batch (LongTensor, optional): Batch vector\n :math:`\\mathbf{b} \\in {\\{ 0, \\ldots, B-1\\}}^N`, which assigns each\n node to a specific example. (default: :obj:`None`)\n edge_attr (Tensor, optional): Edge weights or multi-dimensional edge\n features. (default: :obj:`None`)\n max_num_nodes (int, optional): The size of the output node dimension.\n (default: :obj:`None`)\n batch_size (int, optional) The batch size. (default: :obj:`None`)\n\n :rtype: :class:`Tensor`\n\n Examples:\n\n >>> edge_index = torch.tensor([[0, 0, 1, 2, 3],\n ... [0, 1, 0, 3, 0]])\n >>> batch = torch.tensor([0, 0, 1, 1])\n >>> to_dense_adj(edge_index, batch)\n tensor([[[1., 1.],\n [1., 0.]],\n [[0., 1.],\n [1., 0.]]])\n\n >>> to_dense_adj(edge_index, batch, max_num_nodes=4)\n tensor([[[1., 1., 0., 0.],\n [1., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.]],\n [[0., 1., 0., 0.],\n [1., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.]]])\n\n >>> edge_attr = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0])\n >>> to_dense_adj(edge_index, batch, edge_attr)\n tensor([[[1., 2.],\n [3., 0.]],\n [[0., 4.],\n [5., 0.]]])\n \"\"\"\n if batch is None:\n num_nodes = int(edge_index.max()) + 1 if edge_index.numel() > 0 else 0\n batch = edge_index.new_zeros(num_nodes)\n\n if batch_size is None:\n batch_size = int(batch.max()) + 1 if batch.numel() > 0 else 1\n\n one = batch.new_ones(batch.size(0))\n num_nodes = scatter(one, batch, dim=0, dim_size=batch_size, reduce='sum')\n cum_nodes = cumsum(num_nodes)\n\n idx0 = batch[edge_index[0]]\n idx1 = edge_index[0] - cum_nodes[batch][edge_index[0]]\n idx2 = edge_index[1] - cum_nodes[batch][edge_index[1]]\n\n if max_num_nodes is None:\n max_num_nodes = int(num_nodes.max())\n\n elif ((idx1.numel() > 0 and idx1.max() >= max_num_nodes)\n or (idx2.numel() > 0 and idx2.max() >= max_num_nodes)):\n mask = (idx1 < max_num_nodes) & (idx2 < max_num_nodes)\n idx0 = idx0[mask]\n idx1 = idx1[mask]\n idx2 = idx2[mask]\n edge_attr = None if edge_attr is None else edge_attr[mask]\n\n if edge_attr is None:\n edge_attr = torch.ones(idx0.numel(), device=edge_index.device)\n\n size = [batch_size, max_num_nodes, max_num_nodes]\n size += list(edge_attr.size())[1:]\n flattened_size = batch_size * max_num_nodes * max_num_nodes\n\n idx = idx0 * max_num_nodes * max_num_nodes + idx1 * max_num_nodes + idx2\n adj = scatter(edge_attr, idx, dim=0, dim_size=flattened_size, reduce='sum')\n adj = adj.view(size)\n\n return adj\n", "path": "torch_geometric/utils/to_dense_adj.py"}], "after_files": [{"content": "from typing import Optional\n\nimport torch\nfrom torch import Tensor\n\nfrom torch_geometric.typing import OptTensor\nfrom torch_geometric.utils import cumsum, scatter\n\n\ndef to_dense_adj(\n edge_index: Tensor,\n batch: OptTensor = None,\n edge_attr: OptTensor = None,\n max_num_nodes: Optional[int] = None,\n batch_size: Optional[int] = None,\n) -> Tensor:\n r\"\"\"Converts batched sparse adjacency matrices given by edge indices and\n edge attributes to a single dense batched adjacency matrix.\n\n Args:\n edge_index (LongTensor): The edge indices.\n batch (LongTensor, optional): Batch vector\n :math:`\\mathbf{b} \\in {\\{ 0, \\ldots, B-1\\}}^N`, which assigns each\n node to a specific example. (default: :obj:`None`)\n edge_attr (Tensor, optional): Edge weights or multi-dimensional edge\n features.\n If :obj:`edge_index` contains duplicated edges, the dense adjacency\n matrix output holds the summed up entries of :obj:`edge_attr` for\n duplicated edges. (default: :obj:`None`)\n max_num_nodes (int, optional): The size of the output node dimension.\n (default: :obj:`None`)\n batch_size (int, optional) The batch size. (default: :obj:`None`)\n\n :rtype: :class:`Tensor`\n\n Examples:\n\n >>> edge_index = torch.tensor([[0, 0, 1, 2, 3],\n ... [0, 1, 0, 3, 0]])\n >>> batch = torch.tensor([0, 0, 1, 1])\n >>> to_dense_adj(edge_index, batch)\n tensor([[[1., 1.],\n [1., 0.]],\n [[0., 1.],\n [1., 0.]]])\n\n >>> to_dense_adj(edge_index, batch, max_num_nodes=4)\n tensor([[[1., 1., 0., 0.],\n [1., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.]],\n [[0., 1., 0., 0.],\n [1., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.]]])\n\n >>> edge_attr = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0])\n >>> to_dense_adj(edge_index, batch, edge_attr)\n tensor([[[1., 2.],\n [3., 0.]],\n [[0., 4.],\n [5., 0.]]])\n \"\"\"\n if batch is None:\n num_nodes = int(edge_index.max()) + 1 if edge_index.numel() > 0 else 0\n batch = edge_index.new_zeros(num_nodes)\n\n if batch_size is None:\n batch_size = int(batch.max()) + 1 if batch.numel() > 0 else 1\n\n one = batch.new_ones(batch.size(0))\n num_nodes = scatter(one, batch, dim=0, dim_size=batch_size, reduce='sum')\n cum_nodes = cumsum(num_nodes)\n\n idx0 = batch[edge_index[0]]\n idx1 = edge_index[0] - cum_nodes[batch][edge_index[0]]\n idx2 = edge_index[1] - cum_nodes[batch][edge_index[1]]\n\n if max_num_nodes is None:\n max_num_nodes = int(num_nodes.max())\n\n elif ((idx1.numel() > 0 and idx1.max() >= max_num_nodes)\n or (idx2.numel() > 0 and idx2.max() >= max_num_nodes)):\n mask = (idx1 < max_num_nodes) & (idx2 < max_num_nodes)\n idx0 = idx0[mask]\n idx1 = idx1[mask]\n idx2 = idx2[mask]\n edge_attr = None if edge_attr is None else edge_attr[mask]\n\n if edge_attr is None:\n edge_attr = torch.ones(idx0.numel(), device=edge_index.device)\n\n size = [batch_size, max_num_nodes, max_num_nodes]\n size += list(edge_attr.size())[1:]\n flattened_size = batch_size * max_num_nodes * max_num_nodes\n\n idx = idx0 * max_num_nodes * max_num_nodes + idx1 * max_num_nodes + idx2\n adj = scatter(edge_attr, idx, dim=0, dim_size=flattened_size, reduce='sum')\n adj = adj.view(size)\n\n return adj\n", "path": "torch_geometric/utils/to_dense_adj.py"}]}
| 1,779 | 234 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.